From fa2f173aacca8e53a55d5f2f4cc2aa9df8314190 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Wed, 22 Oct 2025 17:10:33 +0100 Subject: [PATCH 01/23] chore(storage): remove `UnifiedStorageWriterError` (#19210) --- crates/storage/errors/src/lib.rs | 3 --- crates/storage/errors/src/provider.rs | 5 +---- crates/storage/errors/src/writer.rs | 24 ------------------------ 3 files changed, 1 insertion(+), 31 deletions(-) delete mode 100644 crates/storage/errors/src/writer.rs diff --git a/crates/storage/errors/src/lib.rs b/crates/storage/errors/src/lib.rs index 1a09d745140..eca6cd47a45 100644 --- a/crates/storage/errors/src/lib.rs +++ b/crates/storage/errors/src/lib.rs @@ -21,8 +21,5 @@ pub mod lockfile; pub mod provider; pub use provider::{ProviderError, ProviderResult}; -/// Writer error -pub mod writer; - /// Any error pub mod any; diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 9630a1b2a64..ed5230c18fb 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,4 +1,4 @@ -use crate::{any::AnyError, db::DatabaseError, writer::UnifiedStorageWriterError}; +use crate::{any::AnyError, db::DatabaseError}; use alloc::{boxed::Box, string::String}; use alloy_eips::{BlockHashOrNumber, HashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; @@ -125,9 +125,6 @@ pub enum ProviderError { /// Consistent view error. #[error("failed to initialize consistent view: {_0}")] ConsistentView(Box), - /// Storage writer error. - #[error(transparent)] - UnifiedStorageWriterError(#[from] UnifiedStorageWriterError), /// Received invalid output from configured storage implementation. #[error("received invalid output from storage")] InvalidStorageOutput, diff --git a/crates/storage/errors/src/writer.rs b/crates/storage/errors/src/writer.rs deleted file mode 100644 index 52a5ba06e5e..00000000000 --- a/crates/storage/errors/src/writer.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::db::DatabaseError; -use reth_static_file_types::StaticFileSegment; - -/// `UnifiedStorageWriter` related errors -#[derive(Clone, Debug, derive_more::Display, PartialEq, Eq, derive_more::Error)] -pub enum UnifiedStorageWriterError { - /// Database writer is missing - #[display("Database writer is missing")] - MissingDatabaseWriter, - /// Static file writer is missing - #[display("Static file writer is missing")] - MissingStaticFileWriter, - /// Static file writer is of wrong segment - #[display("Static file writer is of wrong segment: got {_0}, expected {_1}")] - IncorrectStaticFileWriter(StaticFileSegment, StaticFileSegment), - /// Database-related errors. - Database(DatabaseError), -} - -impl From for UnifiedStorageWriterError { - fn from(error: DatabaseError) -> Self { - Self::Database(error) - } -} From 8119045258eaa6948080c9fae67de52d488c83ea Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 22 Oct 2025 18:29:55 +0200 Subject: [PATCH 02/23] chore(e2e): relax bounds (#19231) --- crates/e2e-test-utils/src/lib.rs | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index e7b83cb3ad9..57d03f70fa5 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -8,8 +8,8 @@ use reth_network_api::test_utils::PeersHandleProvider; use reth_node_builder::{ components::NodeComponentsBuilder, rpc::{EngineValidatorAddOn, RethRpcAddOns}, - FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodePrimitives, NodeTypes, - NodeTypesWithDBAdapter, PayloadAttributesBuilder, PayloadTypes, + FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodeTypes, NodeTypesWithDBAdapter, + PayloadAttributesBuilder, PayloadTypes, }; use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider}; use reth_tasks::TaskManager; @@ -146,12 +146,6 @@ where >, > + Node< TmpNodeAdapter>>, - Primitives: NodePrimitives< - BlockHeader = alloy_consensus::Header, - BlockBody = alloy_consensus::BlockBody< - ::SignedTx, - >, - >, ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, Components: NodeComponents< @@ -180,12 +174,6 @@ where >, > + Node< TmpNodeAdapter>>, - Primitives: NodePrimitives< - BlockHeader = alloy_consensus::Header, - BlockBody = alloy_consensus::BlockBody< - ::SignedTx, - >, - >, ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, Components: NodeComponents< From 1972ec0949df5d75894c42731c9ee9c2c6b8e8bc Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 22 Oct 2025 12:33:54 -0400 Subject: [PATCH 03/23] revert: "fix(engine): flatten storage cache (#18880)" (#19235) --- crates/engine/tree/src/tree/cached_state.rs | 143 ++++++++++++-------- 1 file changed, 89 insertions(+), 54 deletions(-) diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index c1bb028cab2..bc543d067a0 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -1,8 +1,5 @@ //! Execution cache implementation for block processing. -use alloy_primitives::{ - map::{DefaultHashBuilder, HashSet}, - Address, StorageKey, StorageValue, B256, -}; +use alloy_primitives::{Address, StorageKey, StorageValue, B256}; use metrics::Gauge; use mini_moka::sync::CacheBuilder; use reth_errors::ProviderResult; @@ -17,6 +14,7 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; +use revm_primitives::map::DefaultHashBuilder; use std::{sync::Arc, time::Duration}; use tracing::{debug_span, instrument, trace}; @@ -302,70 +300,65 @@ pub(crate) struct ExecutionCache { /// Cache for contract bytecode, keyed by code hash. code_cache: Cache>, - /// Flattened storage cache: composite key of (`Address`, `StorageKey`) maps directly to - /// values. - storage_cache: Cache<(Address, StorageKey), Option>, + /// Per-account storage cache: outer cache keyed by Address, inner cache tracks that account’s + /// storage slots. + storage_cache: Cache, /// Cache for basic account information (nonce, balance, code hash). account_cache: Cache>, } impl ExecutionCache { - /// Get storage value from flattened cache. + /// Get storage value from hierarchical cache. /// /// Returns a `SlotStatus` indicating whether: - /// - `NotCached`: The storage slot is not in the cache - /// - `Empty`: The slot exists in the cache but is empty + /// - `NotCached`: The account's storage cache doesn't exist + /// - `Empty`: The slot exists in the account's cache but is empty /// - `Value`: The slot exists and has a specific value pub(crate) fn get_storage(&self, address: &Address, key: &StorageKey) -> SlotStatus { - match self.storage_cache.get(&(*address, *key)) { + match self.storage_cache.get(address) { None => SlotStatus::NotCached, - Some(None) => SlotStatus::Empty, - Some(Some(value)) => SlotStatus::Value(value), + Some(account_cache) => account_cache.get_storage(key), } } - /// Insert storage value into flattened cache + /// Insert storage value into hierarchical cache pub(crate) fn insert_storage( &self, address: Address, key: StorageKey, value: Option, ) { - self.storage_cache.insert((address, key), value); + self.insert_storage_bulk(address, [(key, value)]); } - /// Insert multiple storage values into flattened cache for a single account + /// Insert multiple storage values into hierarchical cache for a single account /// - /// This method inserts multiple storage values for the same address directly - /// into the flattened cache. + /// This method is optimized for inserting multiple storage values for the same address + /// by doing the account cache lookup only once instead of for each key-value pair. pub(crate) fn insert_storage_bulk(&self, address: Address, storage_entries: I) where I: IntoIterator)>, { + let account_cache = self.storage_cache.get(&address).unwrap_or_else(|| { + let account_cache = AccountStorageCache::default(); + self.storage_cache.insert(address, account_cache.clone()); + account_cache + }); + for (key, value) in storage_entries { - self.storage_cache.insert((address, key), value); + account_cache.insert_storage(key, value); } } + /// Invalidate storage for specific account + pub(crate) fn invalidate_account_storage(&self, address: &Address) { + self.storage_cache.invalidate(address); + } + /// Returns the total number of storage slots cached across all accounts pub(crate) fn total_storage_slots(&self) -> usize { - self.storage_cache.entry_count() as usize - } - - /// Invalidates the storage for all addresses in the set - #[instrument(level = "debug", target = "engine::caching", skip_all, fields(accounts = addresses.len()))] - pub(crate) fn invalidate_storages(&self, addresses: HashSet<&Address>) { - // NOTE: this must collect because the invalidate function should not be called while we - // hold an iter for it - let storage_entries = self - .storage_cache - .iter() - .filter_map(|entry| addresses.contains(&entry.key().0).then_some(*entry.key())) - .collect::>(); - for key in storage_entries { - self.storage_cache.invalidate(&key) - } + self.storage_cache.iter().map(|addr| addr.len()).sum() } /// Inserts the post-execution state changes into the cache. @@ -405,7 +398,6 @@ impl ExecutionCache { state_updates.state.values().map(|account| account.storage.len()).sum::() ) .entered(); - let mut invalidated_accounts = HashSet::default(); for (addr, account) in &state_updates.state { // If the account was not modified, as in not changed and not destroyed, then we have // nothing to do w.r.t. this particular account and can move on @@ -418,7 +410,7 @@ impl ExecutionCache { // Invalidate the account cache entry if destroyed self.account_cache.invalidate(addr); - invalidated_accounts.insert(addr); + self.invalidate_account_storage(addr); continue } @@ -445,9 +437,6 @@ impl ExecutionCache { self.account_cache.insert(*addr, Some(Account::from(account_info))); } - // invalidate storage for all destroyed accounts - self.invalidate_storages(invalidated_accounts); - Ok(()) } } @@ -476,11 +465,11 @@ impl ExecutionCacheBuilder { const TIME_TO_IDLE: Duration = Duration::from_secs(3600); // 1 hour let storage_cache = CacheBuilder::new(self.storage_cache_entries) - .weigher(|_key: &(Address, StorageKey), _value: &Option| -> u32 { - // Size of composite key (Address + StorageKey) + Option - // Address: 20 bytes, StorageKey: 32 bytes, Option: 33 bytes - // Plus some overhead for the hash map entry - 120_u32 + .weigher(|_key: &Address, value: &AccountStorageCache| -> u32 { + // values based on results from measure_storage_cache_overhead test + let base_weight = 39_000; + let slots_weight = value.len() * 218; + (base_weight + slots_weight) as u32 }) .max_capacity(storage_cache_size) .time_to_live(EXPIRY_TIME) @@ -603,6 +592,56 @@ impl SavedCache { } } +/// Cache for an individual account's storage slots. +/// +/// This represents the second level of the hierarchical storage cache. +/// Each account gets its own `AccountStorageCache` to store accessed storage slots. +#[derive(Debug, Clone)] +pub(crate) struct AccountStorageCache { + /// Map of storage keys to their cached values. + slots: Cache>, +} + +impl AccountStorageCache { + /// Create a new [`AccountStorageCache`] + pub(crate) fn new(max_slots: u64) -> Self { + Self { + slots: CacheBuilder::new(max_slots).build_with_hasher(DefaultHashBuilder::default()), + } + } + + /// Get a storage value from this account's cache. + /// - `NotCached`: The slot is not in the cache + /// - `Empty`: The slot is empty + /// - `Value`: The slot has a specific value + pub(crate) fn get_storage(&self, key: &StorageKey) -> SlotStatus { + match self.slots.get(key) { + None => SlotStatus::NotCached, + Some(None) => SlotStatus::Empty, + Some(Some(value)) => SlotStatus::Value(value), + } + } + + /// Insert a storage value + pub(crate) fn insert_storage(&self, key: StorageKey, value: Option) { + self.slots.insert(key, value); + } + + /// Returns the number of slots in the cache + pub(crate) fn len(&self) -> usize { + self.slots.entry_count() as usize + } +} + +impl Default for AccountStorageCache { + fn default() -> Self { + // With weigher and max_capacity in place, this number represents + // the maximum number of entries that can be stored, not the actual + // memory usage which is controlled by storage cache's max_capacity. + Self::new(1_000_000) + } +} + #[cfg(test)] mod tests { use super::*; @@ -677,36 +716,32 @@ mod tests { #[test] fn measure_storage_cache_overhead() { - let (base_overhead, cache) = - measure_allocation(|| ExecutionCacheBuilder::default().build_caches(1000)); - println!("Base ExecutionCache overhead: {base_overhead} bytes"); + let (base_overhead, cache) = measure_allocation(|| AccountStorageCache::new(1000)); + println!("Base AccountStorageCache overhead: {base_overhead} bytes"); let mut rng = rand::rng(); - let address = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::()); let (first_slot, _) = measure_allocation(|| { - cache.insert_storage(address, key, Some(value)); + cache.insert_storage(key, Some(value)); }); println!("First slot insertion overhead: {first_slot} bytes"); const TOTAL_SLOTS: usize = 10_000; let (test_slots, _) = measure_allocation(|| { for _ in 0..TOTAL_SLOTS { - let addr = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::()); - cache.insert_storage(addr, key, Some(value)); + cache.insert_storage(key, Some(value)); } }); println!("Average overhead over {} slots: {} bytes", TOTAL_SLOTS, test_slots / TOTAL_SLOTS); println!("\nTheoretical sizes:"); - println!("Address size: {} bytes", size_of::
()); println!("StorageKey size: {} bytes", size_of::()); println!("StorageValue size: {} bytes", size_of::()); println!("Option size: {} bytes", size_of::>()); - println!("(Address, StorageKey) size: {} bytes", size_of::<(Address, StorageKey)>()); + println!("Option size: {} bytes", size_of::>()); } #[test] From 4f6cc7a359ba4f7039e4be5dafde727dc6854cdb Mon Sep 17 00:00:00 2001 From: radik878 Date: Wed, 22 Oct 2025 21:20:25 +0300 Subject: [PATCH 04/23] fix(node): remove unused ConsensusLayerHealthEvent variants (#19238) --- crates/node/events/src/cl.rs | 10 +++------- crates/node/events/src/node.rs | 11 ----------- 2 files changed, 3 insertions(+), 18 deletions(-) diff --git a/crates/node/events/src/cl.rs b/crates/node/events/src/cl.rs index bdced7c97d6..99cdc1c245f 100644 --- a/crates/node/events/src/cl.rs +++ b/crates/node/events/src/cl.rs @@ -61,7 +61,7 @@ impl Stream for ConsensusLayerHealthEvents { )) } - // We never had both FCU and transition config exchange. + // We never received any forkchoice updates. return Poll::Ready(Some(ConsensusLayerHealthEvent::NeverSeen)) } } @@ -71,12 +71,8 @@ impl Stream for ConsensusLayerHealthEvents { /// Execution Layer point of view. #[derive(Clone, Copy, Debug)] pub enum ConsensusLayerHealthEvent { - /// Consensus Layer client was never seen. + /// Consensus Layer client was never seen (no forkchoice updates received). NeverSeen, - /// Consensus Layer client has not been seen for a while. - HasNotBeenSeenForAWhile(Duration), - /// Updates from the Consensus Layer client were never received. - NeverReceivedUpdates, - /// Updates from the Consensus Layer client have not been received for a while. + /// Forkchoice updates from the Consensus Layer client have not been received for a while. HaveNotReceivedUpdatesForAWhile(Duration), } diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 3539eae0316..02c7709819e 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -296,17 +296,6 @@ impl NodeState { "Post-merge network, but never seen beacon client. Please launch one to follow the chain!" ) } - ConsensusLayerHealthEvent::HasNotBeenSeenForAWhile(period) => { - warn!( - ?period, - "Post-merge network, but no beacon client seen for a while. Please launch one to follow the chain!" - ) - } - ConsensusLayerHealthEvent::NeverReceivedUpdates => { - warn!( - "Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!" - ) - } ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile(period) => { warn!( ?period, From 346ef408a4bb657de7529c7765c7a5fe77780230 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 22 Oct 2025 22:38:53 +0200 Subject: [PATCH 05/23] chore: swap order for canon stream (#19242) --- crates/optimism/rpc/src/eth/transaction.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/transaction.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index aa7e8ea60bd..37c05815a61 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -95,8 +95,8 @@ where let this = self.clone(); let timeout_duration = self.send_raw_transaction_sync_timeout(); async move { - let hash = EthTransactions::send_raw_transaction(&this, tx).await?; let mut canonical_stream = this.provider().canonical_state_stream(); + let hash = EthTransactions::send_raw_transaction(&this, tx).await?; let flashblock_rx = this.pending_block_rx(); let mut flashblock_stream = flashblock_rx.map(WatchStream::new); diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 81909b3f36e..2cbf1aff14e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -91,8 +91,8 @@ pub trait EthTransactions: LoadTransaction { let this = self.clone(); let timeout_duration = self.send_raw_transaction_sync_timeout(); async move { - let hash = EthTransactions::send_raw_transaction(&this, tx).await?; let mut stream = this.provider().canonical_state_stream(); + let hash = EthTransactions::send_raw_transaction(&this, tx).await?; tokio::time::timeout(timeout_duration, async { while let Some(notification) = stream.next().await { let chain = notification.committed(); From bcef01ce4724070278cc9366d08e8d40decfa335 Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Wed, 22 Oct 2025 19:28:23 -0400 Subject: [PATCH 06/23] feat(jovian): track da footprint block limit. Update basefee calculation (#19048) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 13 +- Cargo.toml | 6 +- crates/optimism/chainspec/src/basefee.rs | 169 +++++++++++++++++++--- crates/optimism/evm/src/build.rs | 8 +- crates/optimism/evm/src/error.rs | 3 + crates/optimism/evm/src/l1.rs | 136 +++++++++++++++-- crates/optimism/payload/Cargo.toml | 1 + crates/optimism/payload/src/builder.rs | 56 +++++-- crates/optimism/rpc/src/eth/receipt.rs | 20 ++- crates/optimism/txpool/src/validator.rs | 4 +- crates/rpc/rpc-convert/src/transaction.rs | 1 - 11 files changed, 358 insertions(+), 59 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6e672b6f684..90aed93b946 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -253,9 +253,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb19405755c6f94c9bb856f2b1449767074b7e2002e1ab2be0a79b9b28db322" +checksum = "83ce19ea6140497670b1b7e721f9a9ce88022fe475a5e4e6a68a403499cca209" dependencies = [ "alloy-consensus", "alloy-eips", @@ -370,9 +370,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f059cf29d7f15b3e6581ceb6eda06a16d8ed4b55adc02b0677add3fd381db6bb" +checksum = "7d7aeaf6051f53880a65b547c43e3b05ee42f68236b1f43f013abfe4eadc47bb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6127,9 +6127,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "11.1.2" +version = "11.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d721c4c196273dd135ea5b823cd573ea8735cd3c5f2c19fcb91ee3af655351" +checksum = "a33ab6a7bbcfffcbf784de78f14593b6389003f5c69653fcffcc163459a37d69" dependencies = [ "auto_impl", "revm", @@ -9435,6 +9435,7 @@ version = "1.8.2" dependencies = [ "alloy-consensus", "alloy-eips", + "alloy-evm", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", diff --git a/Cargo.toml b/Cargo.toml index 08041015646..ae7956ef489 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -478,14 +478,14 @@ revm-inspector = { version = "11.1.0", default-features = false } revm-context = { version = "10.1.0", default-features = false } revm-context-interface = { version = "11.1.0", default-features = false } revm-database-interface = { version = "8.0.1", default-features = false } -op-revm = { version = "11.1.0", default-features = false } +op-revm = { version = "11.2.0", default-features = false } revm-inspectors = "0.31.0" # eth alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.4.1" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.22.0", default-features = false } +alloy-evm = { version = "0.22.4", default-features = false } alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.4.1" @@ -523,7 +523,7 @@ alloy-transport-ipc = { version = "1.0.41", default-features = false } alloy-transport-ws = { version = "1.0.41", default-features = false } # op -alloy-op-evm = { version = "0.22.0", default-features = false } +alloy-op-evm = { version = "0.22.4", default-features = false } alloy-op-hardforks = "0.4.0" op-alloy-rpc-types = { version = "0.21.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.21.0", default-features = false } diff --git a/crates/optimism/chainspec/src/basefee.rs b/crates/optimism/chainspec/src/basefee.rs index 0ef712dc04f..394de296f23 100644 --- a/crates/optimism/chainspec/src/basefee.rs +++ b/crates/optimism/chainspec/src/basefee.rs @@ -1,26 +1,13 @@ //! Base fee related utilities for Optimism chains. +use core::cmp::max; + use alloy_consensus::BlockHeader; +use alloy_eips::calc_next_block_base_fee; use op_alloy_consensus::{decode_holocene_extra_data, decode_jovian_extra_data, EIP1559ParamError}; use reth_chainspec::{BaseFeeParams, EthChainSpec}; use reth_optimism_forks::OpHardforks; -fn next_base_fee_params( - chain_spec: impl EthChainSpec + OpHardforks, - parent: &H, - timestamp: u64, - denominator: u32, - elasticity: u32, -) -> u64 { - let base_fee_params = if elasticity == 0 && denominator == 0 { - chain_spec.base_fee_params_at_timestamp(timestamp) - } else { - BaseFeeParams::new(denominator as u128, elasticity as u128) - }; - - parent.next_block_base_fee(base_fee_params).unwrap_or_default() -} - /// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header. /// /// Caution: Caller must ensure that holocene is active in the parent header. @@ -36,7 +23,13 @@ where { let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?; - Ok(next_base_fee_params(chain_spec, parent, timestamp, denominator, elasticity)) + let base_fee_params = if elasticity == 0 && denominator == 0 { + chain_spec.base_fee_params_at_timestamp(timestamp) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128) + }; + + Ok(parent.next_block_base_fee(base_fee_params).unwrap_or_default()) } /// Extracts the Jovian 1599 parameters from the encoded extra data from the parent header. @@ -57,8 +50,22 @@ where { let (elasticity, denominator, min_base_fee) = decode_jovian_extra_data(parent.extra_data())?; - let next_base_fee = - next_base_fee_params(chain_spec, parent, timestamp, denominator, elasticity); + let base_fee_params = if elasticity == 0 && denominator == 0 { + chain_spec.base_fee_params_at_timestamp(timestamp) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128) + }; + + // Starting from Jovian, we use the maximum of the gas used and the blob gas used to calculate + // the next base fee. + let gas_used = max(parent.gas_used(), parent.blob_gas_used().unwrap_or_default()); + + let next_base_fee = calc_next_block_base_fee( + gas_used, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + base_fee_params, + ); if next_base_fee < min_base_fee { return Ok(min_base_fee); @@ -66,3 +73,127 @@ where Ok(next_base_fee) } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use op_alloy_consensus::encode_jovian_extra_data; + use reth_chainspec::{ChainSpec, ForkCondition, Hardfork}; + use reth_optimism_forks::OpHardfork; + + use crate::{OpChainSpec, BASE_SEPOLIA}; + + use super::*; + + const JOVIAN_TIMESTAMP: u64 = 1900000000; + + fn get_chainspec() -> Arc { + let mut base_sepolia_spec = BASE_SEPOLIA.inner.clone(); + base_sepolia_spec + .hardforks + .insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(JOVIAN_TIMESTAMP)); + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: base_sepolia_spec.chain, + genesis: base_sepolia_spec.genesis, + genesis_header: base_sepolia_spec.genesis_header, + ..Default::default() + }, + }) + } + + #[test] + fn test_next_base_fee_jovian_blob_gas_used_greater_than_gas_used() { + let chain_spec = get_chainspec(); + let mut parent = chain_spec.genesis_header().clone(); + let timestamp = JOVIAN_TIMESTAMP; + + const GAS_LIMIT: u64 = 10_000_000_000; + const BLOB_GAS_USED: u64 = 5_000_000_000; + const GAS_USED: u64 = 1_000_000_000; + const MIN_BASE_FEE: u64 = 100_000_000; + + parent.extra_data = + encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) + .unwrap(); + parent.blob_gas_used = Some(BLOB_GAS_USED); + parent.gas_used = GAS_USED; + parent.gas_limit = GAS_LIMIT; + + let expected_base_fee = calc_next_block_base_fee( + BLOB_GAS_USED, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + BaseFeeParams::base_sepolia(), + ); + assert_eq!( + expected_base_fee, + compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() + ); + assert_ne!( + expected_base_fee, + calc_next_block_base_fee( + GAS_USED, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + BaseFeeParams::base_sepolia(), + ) + ) + } + + #[test] + fn test_next_base_fee_jovian_blob_gas_used_less_than_gas_used() { + let chain_spec = get_chainspec(); + let mut parent = chain_spec.genesis_header().clone(); + let timestamp = JOVIAN_TIMESTAMP; + + const GAS_LIMIT: u64 = 10_000_000_000; + const BLOB_GAS_USED: u64 = 100_000_000; + const GAS_USED: u64 = 1_000_000_000; + const MIN_BASE_FEE: u64 = 100_000_000; + + parent.extra_data = + encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) + .unwrap(); + parent.blob_gas_used = Some(BLOB_GAS_USED); + parent.gas_used = GAS_USED; + parent.gas_limit = GAS_LIMIT; + + let expected_base_fee = calc_next_block_base_fee( + GAS_USED, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + BaseFeeParams::base_sepolia(), + ); + assert_eq!( + expected_base_fee, + compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() + ); + } + + #[test] + fn test_next_base_fee_jovian_min_base_fee() { + let chain_spec = get_chainspec(); + let mut parent = chain_spec.genesis_header().clone(); + let timestamp = JOVIAN_TIMESTAMP; + + const GAS_LIMIT: u64 = 10_000_000_000; + const BLOB_GAS_USED: u64 = 100_000_000; + const GAS_USED: u64 = 1_000_000_000; + const MIN_BASE_FEE: u64 = 5_000_000_000; + + parent.extra_data = + encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) + .unwrap(); + parent.blob_gas_used = Some(BLOB_GAS_USED); + parent.gas_used = GAS_USED; + parent.gas_limit = GAS_LIMIT; + + let expected_base_fee = MIN_BASE_FEE; + assert_eq!( + expected_base_fee, + compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() + ); + } +} diff --git a/crates/optimism/evm/src/build.rs b/crates/optimism/evm/src/build.rs index edc877a9a5d..b8fab18833c 100644 --- a/crates/optimism/evm/src/build.rs +++ b/crates/optimism/evm/src/build.rs @@ -46,7 +46,7 @@ impl OpBlockAssembler { evm_env, execution_ctx: ctx, transactions, - output: BlockExecutionResult { receipts, gas_used, .. }, + output: BlockExecutionResult { receipts, gas_used, blob_gas_used, requests: _ }, bundle_state, state_root, state_provider, @@ -80,7 +80,11 @@ impl OpBlockAssembler { }; let (excess_blob_gas, blob_gas_used) = - if self.chain_spec.is_ecotone_active_at_timestamp(timestamp) { + if self.chain_spec.is_jovian_active_at_timestamp(timestamp) { + // In jovian, we're using the blob gas used field to store the current da + // footprint's value. + (Some(0), Some(*blob_gas_used)) + } else if self.chain_spec.is_ecotone_active_at_timestamp(timestamp) { (Some(0), Some(0)) } else { (None, None) diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index 9b694243fac..1a8e76c1490 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -38,6 +38,9 @@ pub enum L1BlockInfoError { /// Operator fee constant conversion error #[error("could not convert operator fee constant")] OperatorFeeConstantConversion, + /// DA foootprint gas scalar constant conversion error + #[error("could not convert DA footprint gas scalar constant")] + DaFootprintGasScalarConversion, /// Optimism hardforks not active #[error("Optimism hardforks are not active")] HardforksNotActive, diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 4165221c987..2afe6e9d3a2 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -2,7 +2,7 @@ use crate::{error::L1BlockInfoError, revm_spec_by_timestamp_after_bedrock, OpBlockExecutionError}; use alloy_consensus::Transaction; -use alloy_primitives::{hex, U256}; +use alloy_primitives::{hex, U16, U256}; use op_revm::L1BlockInfo; use reth_execution_errors::BlockExecutionError; use reth_optimism_forks::OpHardforks; @@ -14,6 +14,10 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// The function selector of the "setL1BlockValuesIsthmus" function in the `L1Block` contract. const L1_BLOCK_ISTHMUS_SELECTOR: [u8; 4] = hex!("098999be"); +/// The function selector of the "setL1BlockValuesJovian" function in the `L1Block` contract. +/// This is the first 4 bytes of `keccak256("setL1BlockValuesJovian()")`. +const L1_BLOCK_JOVIAN_SELECTOR: [u8; 4] = hex!("3db6be2b"); + /// Extracts the [`L1BlockInfo`] from the L2 block. The L1 info transaction is always the first /// transaction in the L2 block. /// @@ -52,11 +56,14 @@ pub fn extract_l1_info_from_tx( /// If the input is shorter than 4 bytes. pub fn parse_l1_info(input: &[u8]) -> Result { // Parse the L1 info transaction into an L1BlockInfo struct, depending on the function selector. - // There are currently 3 variants: + // There are currently 4 variants: + // - Jovian // - Isthmus // - Ecotone // - Bedrock - if input[0..4] == L1_BLOCK_ISTHMUS_SELECTOR { + if input[0..4] == L1_BLOCK_JOVIAN_SELECTOR { + parse_l1_info_tx_jovian(input[4..].as_ref()) + } else if input[0..4] == L1_BLOCK_ISTHMUS_SELECTOR { parse_l1_info_tx_isthmus(input[4..].as_ref()) } else if input[0..4] == L1_BLOCK_ECOTONE_SELECTOR { parse_l1_info_tx_ecotone(input[4..].as_ref()) @@ -88,14 +95,12 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result Result Result Result { + if data.len() != 174 { + return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::UnexpectedCalldataLength)); + } + + // https://github.com/ethereum-optimism/op-geth/blob/60038121c7571a59875ff9ed7679c48c9f73405d/core/types/rollup_cost.go#L317-L328 + // + // data layout assumed for Ecotone: + // offset type varname + // 0 + // 4 uint32 _basefeeScalar (start offset in this scope) + // 8 uint32 _blobBaseFeeScalar + // 12 uint64 _sequenceNumber, + // 20 uint64 _timestamp, + // 28 uint64 _l1BlockNumber + // 36 uint256 _basefee, + // 68 uint256 _blobBaseFee, + // 100 bytes32 _hash, + // 132 bytes32 _batcherHash, + // 164 uint32 _operatorFeeScalar + // 168 uint64 _operatorFeeConstant + // 176 uint16 _daFootprintGasScalar + + let l1_base_fee_scalar = U256::try_from_be_slice(&data[..4]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeScalarConversion))?; + let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[4..8]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeScalarConversion) + })?; + let l1_base_fee = U256::try_from_be_slice(&data[32..64]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeConversion))?; + let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeConversion))?; + let operator_fee_scalar = U256::try_from_be_slice(&data[160..164]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeScalarConversion) + })?; + let operator_fee_constant = U256::try_from_be_slice(&data[164..172]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeConstantConversion) + })?; + let da_footprint_gas_scalar: u16 = U16::try_from_be_slice(&data[172..174]) + .ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::DaFootprintGasScalarConversion) + })? + .to(); - Ok(l1block) + Ok(L1BlockInfo { + l1_base_fee, + l1_base_fee_scalar, + l1_blob_base_fee: Some(l1_blob_base_fee), + l1_blob_base_fee_scalar: Some(l1_blob_base_fee_scalar), + operator_fee_scalar: Some(operator_fee_scalar), + operator_fee_constant: Some(operator_fee_constant), + da_footprint_gas_scalar: Some(da_footprint_gas_scalar), + ..Default::default() + }) } /// An extension trait for [`L1BlockInfo`] that allows us to calculate the L1 cost of a transaction @@ -282,6 +354,7 @@ mod tests { use super::*; use alloy_consensus::{Block, BlockBody}; use alloy_eips::eip2718::Decodable2718; + use alloy_primitives::keccak256; use reth_optimism_chainspec::OP_MAINNET; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::OpTransactionSigned; @@ -308,6 +381,12 @@ mod tests { assert_eq!(l1_info.l1_blob_base_fee_scalar, None); } + #[test] + fn test_verify_set_jovian() { + let hash = &keccak256("setL1BlockValuesJovian()")[..4]; + assert_eq!(hash, L1_BLOCK_JOVIAN_SELECTOR) + } + #[test] fn sanity_l1_block_ecotone() { // rig @@ -408,4 +487,33 @@ mod tests { assert_eq!(l1_block_info.operator_fee_scalar, operator_fee_scalar); assert_eq!(l1_block_info.operator_fee_constant, operator_fee_constant); } + + #[test] + fn parse_l1_info_jovian() { + // L1 block info from a devnet with Isthmus activated + const DATA: &[u8] = &hex!( + "3db6be2b00000558000c5fc500000000000000030000000067a9f765000000000000002900000000000000000000000000000000000000000000000000000000006a6d09000000000000000000000000000000000000000000000000000000000000000172fcc8e8886636bdbe96ba0e4baab67ea7e7811633f52b52e8cf7a5123213b6f000000000000000000000000d3f2c5afb2d76f5579f326b0cd7da5f5a4126c3500004e2000000000000001f4dead" + ); + + // expected l1 block info verified against expected l1 fee and operator fee for tx. + let l1_base_fee = U256::from(6974729); + let l1_base_fee_scalar = U256::from(1368); + let l1_blob_base_fee = Some(U256::from(1)); + let l1_blob_base_fee_scalar = Some(U256::from(810949)); + let operator_fee_scalar = Some(U256::from(20000)); + let operator_fee_constant = Some(U256::from(500)); + let da_footprint_gas_scalar: Option = Some(U16::from(0xdead).to()); + + // test + + let l1_block_info = parse_l1_info(DATA).unwrap(); + + assert_eq!(l1_block_info.l1_base_fee, l1_base_fee); + assert_eq!(l1_block_info.l1_base_fee_scalar, l1_base_fee_scalar); + assert_eq!(l1_block_info.l1_blob_base_fee, l1_blob_base_fee); + assert_eq!(l1_block_info.l1_blob_base_fee_scalar, l1_blob_base_fee_scalar); + assert_eq!(l1_block_info.operator_fee_scalar, operator_fee_scalar); + assert_eq!(l1_block_info.operator_fee_constant, operator_fee_constant); + assert_eq!(l1_block_info.da_footprint_gas_scalar, da_footprint_gas_scalar); + } } diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 8d1875fe753..e75075a12cf 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -44,6 +44,7 @@ op-alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true alloy-rpc-types-debug.workspace = true alloy-consensus.workspace = true +alloy-evm.workspace = true # misc derive_more.workspace = true diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 67b8faf5608..05f33d3b699 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,5 +1,4 @@ //! Optimism payload builder implementation. - use crate::{ config::{OpBuilderConfig, OpDAConfig}, error::OpPayloadBuilderError, @@ -7,6 +6,7 @@ use crate::{ OpAttributes, OpPayloadBuilderAttributes, OpPayloadPrimitives, }; use alloy_consensus::{BlockHeader, Transaction, Typed2718}; +use alloy_evm::Evm as AlloyEvm; use alloy_primitives::{B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; @@ -14,10 +14,12 @@ use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ + block::BlockExecutorFor, execute::{ BlockBuilder, BlockBuilderOutcome, BlockExecutionError, BlockExecutor, BlockValidationError, }, - ConfigureEvm, Database, Evm, + op_revm::{constants::L1_BLOCK_CONTRACT, L1BlockInfo}, + ConfigureEvm, Database, }; use reth_execution_types::ExecutionOutcome; use reth_optimism_forks::OpHardforks; @@ -340,6 +342,11 @@ impl OpBuilder<'_, Txs> { let mut db = State::builder().with_database(db).with_bundle_update().build(); + // Load the L1 block contract into the database cache. If the L1 block contract is not + // pre-loaded the database will panic when trying to fetch the DA footprint gas + // scalar. + db.load_cache_account(L1_BLOCK_CONTRACT).map_err(BlockExecutionError::other)?; + let mut builder = ctx.block_builder(&mut db)?; // 1. apply pre-execution changes @@ -509,17 +516,27 @@ impl ExecutionInfo { tx_data_limit: Option, block_data_limit: Option, tx_gas_limit: u64, + da_footprint_gas_scalar: Option, ) -> bool { if tx_data_limit.is_some_and(|da_limit| tx_da_size > da_limit) { return true; } - if block_data_limit - .is_some_and(|da_limit| self.cumulative_da_bytes_used + tx_da_size > da_limit) - { + let total_da_bytes_used = self.cumulative_da_bytes_used.saturating_add(tx_da_size); + + if block_data_limit.is_some_and(|da_limit| total_da_bytes_used > da_limit) { return true; } + // Post Jovian: the tx DA footprint must be less than the block gas limit + if let Some(da_footprint_gas_scalar) = da_footprint_gas_scalar { + let tx_da_footprint = + total_da_bytes_used.saturating_mul(da_footprint_gas_scalar as u64); + if tx_da_footprint > block_gas_limit { + return true; + } + } + self.cumulative_gas_used + tx_gas_limit > block_gas_limit } } @@ -586,7 +603,13 @@ where pub fn block_builder<'a, DB: Database>( &'a self, db: &'a mut State, - ) -> Result + 'a, PayloadBuilderError> { + ) -> Result< + impl BlockBuilder< + Primitives = Evm::Primitives, + Executor: BlockExecutorFor<'a, Evm::BlockExecutorFactory, DB>, + > + 'a, + PayloadBuilderError, + > { self.evm_config .builder_for_next_block( db, @@ -649,14 +672,18 @@ where /// Executes the given best transactions and updates the execution info. /// /// Returns `Ok(Some(())` if the job was cancelled. - pub fn execute_best_transactions( + pub fn execute_best_transactions( &self, info: &mut ExecutionInfo, - builder: &mut impl BlockBuilder, + builder: &mut Builder, mut best_txs: impl PayloadTransactions< Transaction: PoolTransaction> + OpPooledTx, >, - ) -> Result, PayloadBuilderError> { + ) -> Result, PayloadBuilderError> + where + Builder: BlockBuilder, + <::Evm as AlloyEvm>::DB: Database, + { let block_gas_limit = builder.evm_mut().block().gas_limit(); let block_da_limit = self.da_config.max_da_block_size(); let tx_da_limit = self.da_config.max_da_tx_size(); @@ -666,12 +693,23 @@ where let interop = tx.interop_deadline(); let tx_da_size = tx.estimated_da_size(); let tx = tx.into_consensus(); + + let da_footprint_gas_scalar = self + .chain_spec + .is_jovian_active_at_timestamp(self.attributes().timestamp()) + .then_some( + L1BlockInfo::fetch_da_footprint_gas_scalar(builder.evm_mut().db_mut()).expect( + "DA footprint should always be available from the database post jovian", + ), + ); + if info.is_tx_over_limits( tx_da_size, block_gas_limit, tx_da_limit, block_da_limit, tx.gas_limit(), + da_footprint_gas_scalar, ) { // we can't fit this transaction into the block, so we need to mark it as // invalid which also removes all dependent transaction from diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index f8910c22a33..5d1e8e29794 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -131,10 +131,14 @@ pub struct OpReceiptFieldsBuilder { pub l1_blob_base_fee: Option, /// The current L1 blob base fee scalar. pub l1_blob_base_fee_scalar: Option, + /* ---------------------------------------- Isthmus ---------------------------------------- */ /// The current operator fee scalar. pub operator_fee_scalar: Option, /// The current L1 blob base fee scalar. pub operator_fee_constant: Option, + /* ---------------------------------------- Jovian ----------------------------------------- */ + /// The current DA footprint gas scalar. + pub da_footprint_gas_scalar: Option, } impl OpReceiptFieldsBuilder { @@ -154,6 +158,7 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar: None, operator_fee_scalar: None, operator_fee_constant: None, + da_footprint_gas_scalar: None, } } @@ -205,6 +210,8 @@ impl OpReceiptFieldsBuilder { l1_block_info.operator_fee_constant.map(|constant| constant.saturating_to()); } + self.da_footprint_gas_scalar = l1_block_info.da_footprint_gas_scalar; + Ok(self) } @@ -236,6 +243,7 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, + da_footprint_gas_scalar, } = self; OpTransactionReceiptFields { @@ -249,7 +257,7 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, - da_footprint_gas_scalar: None, + da_footprint_gas_scalar, }, deposit_nonce, deposit_receipt_version, @@ -409,7 +417,7 @@ mod test { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, - .. + da_footprint_gas_scalar, } = receipt_meta.l1_block_info; assert_eq!( @@ -453,6 +461,11 @@ mod test { TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.operator_fee_constant, "incorrect operator fee constant" ); + assert_eq!( + da_footprint_gas_scalar, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.da_footprint_gas_scalar, + "incorrect da footprint gas scalar" + ); } #[test] @@ -540,7 +553,7 @@ mod test { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, - .. + da_footprint_gas_scalar, } = receipt_meta.l1_block_info; assert_eq!(l1_gas_price, Some(14121491676), "incorrect l1 base fee (former gas price)"); @@ -552,5 +565,6 @@ mod test { assert_eq!(l1_blob_base_fee_scalar, Some(1055762), "incorrect l1 blob base fee scalar"); assert_eq!(operator_fee_scalar, None, "incorrect operator fee scalar"); assert_eq!(operator_fee_constant, None, "incorrect operator fee constant"); + assert_eq!(da_footprint_gas_scalar, None, "incorrect da footprint gas scalar"); } } diff --git a/crates/optimism/txpool/src/validator.rs b/crates/optimism/txpool/src/validator.rs index 631c4255942..0cec4482a32 100644 --- a/crates/optimism/txpool/src/validator.rs +++ b/crates/optimism/txpool/src/validator.rs @@ -143,8 +143,8 @@ where self.block_info.timestamp.store(header.timestamp(), Ordering::Relaxed); self.block_info.number.store(header.number(), Ordering::Relaxed); - if let Some(Ok(cost_addition)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) { - *self.block_info.l1_block_info.write() = cost_addition; + if let Some(Ok(l1_block_info)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) { + *self.block_info.l1_block_info.write() = l1_block_info; } if self.chain_spec().is_interop_active_at_timestamp(header.timestamp()) { diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 046acbda544..6766ec43fb0 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -1,5 +1,4 @@ //! Compatibility functions for rpc `Transaction` type. - use crate::{ fees::{CallFees, CallFeesError}, RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, SignableTxRequest, From f8845c6fbb8e0fe23e5f69f9514dc2b9415558cb Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 23 Oct 2025 05:36:16 +0100 Subject: [PATCH 07/23] fix(engine): payload processor tracing event targets (#19223) --- .../src/tree/payload_processor/multiproof.rs | 48 +++++++++---------- .../src/tree/payload_processor/prewarm.rs | 16 +++---- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 1e5b226f591..737f57fb345 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -218,7 +218,7 @@ pub(crate) fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostStat for (address, account) in update { if account.is_touched() { let hashed_address = keccak256(address); - trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); + trace!(target: "engine::tree::payload_processor::multiproof", ?address, ?hashed_address, "Adding account to state update"); let destroyed = account.is_selfdestructed(); let info = if destroyed { None } else { Some(account.info.into()) }; @@ -456,7 +456,7 @@ impl MultiproofManager { let storage_targets = proof_targets.len(); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", proof_sequence_number, ?proof_targets, storage_targets, @@ -475,7 +475,7 @@ impl MultiproofManager { .storage_proof(hashed_address, proof_targets); let elapsed = start.elapsed(); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", proof_sequence_number, ?elapsed, ?source, @@ -529,7 +529,7 @@ impl MultiproofManager { let storage_targets = proof_targets.values().map(|slots| slots.len()).sum::(); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", proof_sequence_number, ?proof_targets, account_targets, @@ -567,7 +567,7 @@ impl MultiproofManager { })(); let elapsed = start.elapsed(); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", proof_sequence_number, ?elapsed, ?source, @@ -781,7 +781,7 @@ impl MultiProofTask { proofs_processed >= state_update_proofs_requested + prefetch_proofs_requested; let no_pending = !self.proof_sequencer.has_pending(); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", proofs_processed, state_update_proofs_requested, prefetch_proofs_requested, @@ -836,7 +836,7 @@ impl MultiProofTask { } if duplicates > 0 { - trace!(target: "engine::root", duplicates, "Removed duplicate prefetch proof targets"); + trace!(target: "engine::tree::payload_processor::multiproof", duplicates, "Removed duplicate prefetch proof targets"); } targets @@ -998,18 +998,18 @@ impl MultiProofTask { let mut updates_finished_time = None; loop { - trace!(target: "engine::root", "entering main channel receiving loop"); + trace!(target: "engine::tree::payload_processor::multiproof", "entering main channel receiving loop"); match self.rx.recv() { Ok(message) => match message { MultiProofMessage::PrefetchProofs(targets) => { - trace!(target: "engine::root", "processing MultiProofMessage::PrefetchProofs"); + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::PrefetchProofs"); if first_update_time.is_none() { // record the wait time self.metrics .first_update_wait_time_histogram .record(start.elapsed().as_secs_f64()); first_update_time = Some(Instant::now()); - debug!(target: "engine::root", "Started state root calculation"); + debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); } let account_targets = targets.len(); @@ -1017,7 +1017,7 @@ impl MultiProofTask { targets.values().map(|slots| slots.len()).sum::(); prefetch_proofs_requested += self.on_prefetch_proof(targets); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", account_targets, storage_targets, prefetch_proofs_requested, @@ -1025,20 +1025,20 @@ impl MultiProofTask { ); } MultiProofMessage::StateUpdate(source, update) => { - trace!(target: "engine::root", "processing MultiProofMessage::StateUpdate"); + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::StateUpdate"); if first_update_time.is_none() { // record the wait time self.metrics .first_update_wait_time_histogram .record(start.elapsed().as_secs_f64()); first_update_time = Some(Instant::now()); - debug!(target: "engine::root", "Started state root calculation"); + debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); } let len = update.len(); state_update_proofs_requested += self.on_state_update(source, update); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", ?source, len, ?state_update_proofs_requested, @@ -1046,7 +1046,7 @@ impl MultiProofTask { ); } MultiProofMessage::FinishedStateUpdates => { - trace!(target: "engine::root", "processing MultiProofMessage::FinishedStateUpdates"); + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::FinishedStateUpdates"); updates_finished = true; updates_finished_time = Some(Instant::now()); if self.is_done( @@ -1056,14 +1056,14 @@ impl MultiProofTask { updates_finished, ) { debug!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", "State updates finished and all proofs processed, ending calculation" ); break } } MultiProofMessage::EmptyProof { sequence_number, state } => { - trace!(target: "engine::root", "processing MultiProofMessage::EmptyProof"); + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::EmptyProof"); proofs_processed += 1; @@ -1081,14 +1081,14 @@ impl MultiProofTask { updates_finished, ) { debug!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", "State updates finished and all proofs processed, ending calculation" ); break } } MultiProofMessage::ProofCalculated(proof_calculated) => { - trace!(target: "engine::root", "processing + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::ProofCalculated"); // we increment proofs_processed for both state updates and prefetches, @@ -1100,7 +1100,7 @@ impl MultiProofTask { .record(proof_calculated.elapsed); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", sequence = proof_calculated.sequence_number, total_proofs = proofs_processed, "Processing calculated proof" @@ -1121,14 +1121,14 @@ impl MultiProofTask { updates_finished, ) { debug!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", "State updates finished and all proofs processed, ending calculation"); break } } MultiProofMessage::ProofCalculationError(err) => { error!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", ?err, "proof calculation error" ); @@ -1138,14 +1138,14 @@ impl MultiProofTask { Err(_) => { // this means our internal message channel is closed, which shouldn't happen // in normal operation since we hold both ends - error!(target: "engine::root", "Internal message channel closed unexpectedly"); + error!(target: "engine::tree::payload_processor::multiproof", "Internal message channel closed unexpectedly"); return } } } debug!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", total_updates = state_update_proofs_requested, total_proofs = proofs_processed, total_time = ?first_update_time.map(|t|t.elapsed()), diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 134233233ee..abc3bd58351 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -106,7 +106,7 @@ where let (actions_tx, actions_rx) = channel(); trace!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", max_concurrency, transaction_count_hint, "Initialized prewarm task" @@ -185,7 +185,7 @@ where for handle in &handles { if let Err(err) = handle.send(indexed_tx.clone()) { warn!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", tx_hash = %first_tx_hash, error = %err, "Failed to send deposit transaction to worker" @@ -196,7 +196,7 @@ where // Not a deposit, send to first worker via round-robin if let Err(err) = handles[0].send(indexed_tx) { warn!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", task_idx = 0, error = %err, "Failed to send transaction to worker" @@ -213,7 +213,7 @@ where let task_idx = executing % workers_needed; if let Err(err) = handles[task_idx].send(indexed_tx) { warn!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", task_idx, error = %err, "Failed to send transaction to worker" @@ -329,7 +329,7 @@ where self.send_multi_proof_targets(proof_targets); } PrewarmTaskEvent::Terminate { block_output } => { - trace!(target: "engine::tree::prewarm", "Received termination signal"); + trace!(target: "engine::tree::payload_processor::prewarm", "Received termination signal"); final_block_output = Some(block_output); if finished_execution { @@ -338,7 +338,7 @@ where } } PrewarmTaskEvent::FinishedTxExecution { executed_transactions } => { - trace!(target: "engine::tree::prewarm", "Finished prewarm execution signal"); + trace!(target: "engine::tree::payload_processor::prewarm", "Finished prewarm execution signal"); self.ctx.metrics.transactions.set(executed_transactions as f64); self.ctx.metrics.transactions_histogram.record(executed_transactions as f64); @@ -352,7 +352,7 @@ where } } - debug!(target: "engine::tree::prewarm", "Completed prewarm execution"); + debug!(target: "engine::tree::payload_processor::prewarm", "Completed prewarm execution"); // save caches and finish if let Some(Some(state)) = final_block_output { @@ -488,7 +488,7 @@ where Ok(res) => res, Err(err) => { trace!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", %err, tx_hash=%tx.tx().tx_hash(), sender=%tx.signer(), From 4548209e7b00351a993779820b78db86b8628c9b Mon Sep 17 00:00:00 2001 From: YK Date: Thu, 23 Oct 2025 15:19:21 +0800 Subject: [PATCH 08/23] perf: rm pending queue from MultiproofManager (#19178) --- .../src/tree/payload_processor/multiproof.rs | 94 +++++++++---------- crates/trie/parallel/src/proof_task.rs | 86 +++++++++++++++-- 2 files changed, 125 insertions(+), 55 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 737f57fb345..9f136a48125 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -24,7 +24,7 @@ use reth_trie_parallel::{ root::ParallelStateRootError, }; use std::{ - collections::{BTreeMap, VecDeque}, + collections::BTreeMap, ops::DerefMut, sync::{ mpsc::{channel, Receiver, Sender}, @@ -34,10 +34,6 @@ use std::{ }; use tracing::{debug, error, instrument, trace}; -/// Default upper bound for inflight multiproof calculations. These would be sitting in the queue -/// waiting to be processed. -const DEFAULT_MULTIPROOF_INFLIGHT_LIMIT: usize = 128; - /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. #[derive(Default, Debug)] @@ -337,17 +333,10 @@ impl MultiproofInput { } /// Manages concurrent multiproof calculations. -/// Takes care of not having more calculations in flight than a given maximum -/// concurrency, further calculation requests are queued and spawn later, after -/// availability has been signaled. #[derive(Debug)] pub struct MultiproofManager { - /// Maximum number of proof calculations allowed to be inflight at once. - inflight_limit: usize, /// Currently running calculations. inflight: usize, - /// Queued calculations. - pending: VecDeque, /// Executor for tasks executor: WorkloadExecutor, /// Handle to the proof worker pools (storage and account). @@ -376,22 +365,16 @@ impl MultiproofManager { proof_worker_handle: ProofWorkerHandle, ) -> Self { Self { - pending: VecDeque::with_capacity(DEFAULT_MULTIPROOF_INFLIGHT_LIMIT), - inflight_limit: DEFAULT_MULTIPROOF_INFLIGHT_LIMIT, - executor, inflight: 0, + executor, metrics, proof_worker_handle, missed_leaves_storage_roots: Default::default(), } } - const fn is_full(&self) -> bool { - self.inflight >= self.inflight_limit - } - - /// Spawns a new multiproof calculation or enqueues it if the inflight limit is reached. - fn spawn_or_queue(&mut self, input: PendingMultiproofTask) { + /// Spawns a new multiproof calculation. + fn spawn(&mut self, input: PendingMultiproofTask) { // If there are no proof targets, we can just send an empty multiproof back immediately if input.proof_targets_is_empty() { debug!( @@ -402,27 +385,9 @@ impl MultiproofManager { return } - if self.is_full() { - self.pending.push_back(input); - self.metrics.pending_multiproofs_histogram.record(self.pending.len() as f64); - return; - } - self.spawn_multiproof_task(input); } - /// Signals that a multiproof calculation has finished and there's room to - /// spawn a new calculation if needed. - fn on_calculation_complete(&mut self) { - self.inflight = self.inflight.saturating_sub(1); - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); - - if let Some(input) = self.pending.pop_front() { - self.metrics.pending_multiproofs_histogram.record(self.pending.len() as f64); - self.spawn_multiproof_task(input); - } - } - /// Spawns a multiproof task, dispatching to `spawn_storage_proof` if the input is a storage /// multiproof, and dispatching to `spawn_multiproof` otherwise. fn spawn_multiproof_task(&mut self, input: PendingMultiproofTask) { @@ -508,6 +473,24 @@ impl MultiproofManager { self.inflight += 1; self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + self.metrics + .pending_storage_multiproofs_histogram + .record(self.proof_worker_handle.pending_storage_tasks() as f64); + self.metrics + .pending_account_multiproofs_histogram + .record(self.proof_worker_handle.pending_account_tasks() as f64); + } + + /// Signals that a multiproof calculation has finished. + fn on_calculation_complete(&mut self) { + self.inflight = self.inflight.saturating_sub(1); + self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + self.metrics + .pending_storage_multiproofs_histogram + .record(self.proof_worker_handle.pending_storage_tasks() as f64); + self.metrics + .pending_account_multiproofs_histogram + .record(self.proof_worker_handle.pending_account_tasks() as f64); } /// Spawns a single multiproof calculation task. @@ -598,6 +581,12 @@ impl MultiproofManager { self.inflight += 1; self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + self.metrics + .pending_storage_multiproofs_histogram + .record(self.proof_worker_handle.pending_storage_tasks() as f64); + self.metrics + .pending_account_multiproofs_histogram + .record(self.proof_worker_handle.pending_account_tasks() as f64); } } @@ -606,8 +595,10 @@ impl MultiproofManager { pub(crate) struct MultiProofTaskMetrics { /// Histogram of inflight multiproofs. pub inflight_multiproofs_histogram: Histogram, - /// Histogram of pending multiproofs. - pub pending_multiproofs_histogram: Histogram, + /// Histogram of pending storage multiproofs in the queue. + pub pending_storage_multiproofs_histogram: Histogram, + /// Histogram of pending account multiproofs in the queue. + pub pending_account_multiproofs_histogram: Histogram, /// Histogram of the number of prefetch proof target accounts. pub prefetch_proof_targets_accounts_histogram: Histogram, @@ -657,8 +648,7 @@ pub(crate) struct MultiProofTaskMetrics { #[derive(Debug)] pub(super) struct MultiProofTask { /// The size of proof targets chunk to spawn in one calculation. - /// - /// If [`None`], then chunking is disabled. + /// If None, chunking is disabled and all targets are processed in a single proof. chunk_size: Option, /// Task configuration. config: MultiProofConfig, @@ -738,10 +728,14 @@ impl MultiProofTask { // Process proof targets in chunks. let mut chunks = 0; - let should_chunk = !self.multiproof_manager.is_full(); + + // Only chunk if account or storage workers are available to take advantage of parallelism. + let should_chunk = + self.multiproof_manager.proof_worker_handle.has_available_account_workers() || + self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); let mut spawn = |proof_targets| { - self.multiproof_manager.spawn_or_queue( + self.multiproof_manager.spawn( MultiproofInput { config: self.config.clone(), source: None, @@ -873,10 +867,14 @@ impl MultiProofTask { // Process state updates in chunks. let mut chunks = 0; - let should_chunk = !self.multiproof_manager.is_full(); let mut spawned_proof_targets = MultiProofTargets::default(); + // Only chunk if account or storage workers are available to take advantage of parallelism. + let should_chunk = + self.multiproof_manager.proof_worker_handle.has_available_account_workers() || + self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); + let mut spawn = |hashed_state_update| { let proof_targets = get_proof_targets( &hashed_state_update, @@ -885,7 +883,7 @@ impl MultiProofTask { ); spawned_proof_targets.extend_ref(&proof_targets); - self.multiproof_manager.spawn_or_queue( + self.multiproof_manager.spawn( MultiproofInput { config: self.config.clone(), source: Some(source), @@ -954,7 +952,7 @@ impl MultiProofTask { /// so that the proofs for accounts and storage slots that were already fetched are not /// requested again. /// 2. Using the proof targets, a new multiproof is calculated using - /// [`MultiproofManager::spawn_or_queue`]. + /// [`MultiproofManager::spawn`]. /// * If the list of proof targets is empty, the [`MultiProofMessage::EmptyProof`] message is /// sent back to this task along with the original state update. /// * Otherwise, the multiproof is calculated and the [`MultiProofMessage::ProofCalculated`] diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 6525500a2a2..18e93dc26a4 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -51,6 +51,7 @@ use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ sync::{ + atomic::{AtomicUsize, Ordering}, mpsc::{channel, Receiver, Sender}, Arc, }, @@ -116,6 +117,7 @@ fn storage_worker_loop( task_ctx: ProofTaskCtx, work_rx: CrossbeamReceiver, worker_id: usize, + available_workers: Arc, #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where Factory: DatabaseProviderFactory, @@ -144,7 +146,13 @@ fn storage_worker_loop( let mut storage_proofs_processed = 0u64; let mut storage_nodes_processed = 0u64; + // Initially mark this worker as available. + available_workers.fetch_add(1, Ordering::Relaxed); + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); + match job { StorageWorkerJob::StorageProof { input, result_sender } => { let hashed_address = input.hashed_address; @@ -186,6 +194,9 @@ fn storage_worker_loop( total_processed = storage_proofs_processed, "Storage proof completed" ); + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); } StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { @@ -224,6 +235,9 @@ fn storage_worker_loop( total_processed = storage_nodes_processed, "Blinded storage node completed" ); + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); } } } @@ -244,11 +258,9 @@ fn storage_worker_loop( /// /// # Lifecycle /// -/// Each worker: -/// 1. Receives `AccountWorkerJob` from crossbeam unbounded channel -/// 2. Computes result using its dedicated long-lived transaction -/// 3. Sends result directly to original caller via `std::mpsc` -/// 4. Repeats until channel closes (graceful shutdown) +/// Each worker initializes its providers, advertises availability, then loops: +/// receive an account job, mark busy, process the work, respond, and mark available again. +/// The loop ends gracefully once the channel closes. /// /// # Transaction Reuse /// @@ -269,6 +281,7 @@ fn account_worker_loop( work_rx: CrossbeamReceiver, storage_work_tx: CrossbeamSender, worker_id: usize, + available_workers: Arc, #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where Factory: DatabaseProviderFactory, @@ -297,7 +310,13 @@ fn account_worker_loop( let mut account_proofs_processed = 0u64; let mut account_nodes_processed = 0u64; + // Count this worker as available only after successful initialization. + available_workers.fetch_add(1, Ordering::Relaxed); + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); + match job { AccountWorkerJob::AccountMultiproof { mut input, result_sender } => { let span = tracing::debug_span!( @@ -381,6 +400,9 @@ fn account_worker_loop( "Account multiproof completed" ); drop(_span_guard); + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); } AccountWorkerJob::BlindedAccountNode { path, result_sender } => { @@ -420,6 +442,9 @@ fn account_worker_loop( "Blinded account node completed" ); drop(_span_guard); + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); } } } @@ -866,6 +891,12 @@ pub struct ProofWorkerHandle { storage_work_tx: CrossbeamSender, /// Direct sender to account worker pool account_work_tx: CrossbeamSender, + /// Counter tracking available storage workers. Workers decrement when starting work, + /// increment when finishing. Used to determine whether to chunk multiproofs. + storage_available_workers: Arc, + /// Counter tracking available account workers. Workers decrement when starting work, + /// increment when finishing. Used to determine whether to chunk multiproofs. + account_available_workers: Arc, } impl ProofWorkerHandle { @@ -893,6 +924,11 @@ impl ProofWorkerHandle { let (storage_work_tx, storage_work_rx) = unbounded::(); let (account_work_tx, account_work_rx) = unbounded::(); + // Initialize availability counters at zero. Each worker will increment when it + // successfully initializes, ensuring only healthy workers are counted. + let storage_available_workers = Arc::new(AtomicUsize::new(0)); + let account_available_workers = Arc::new(AtomicUsize::new(0)); + tracing::debug!( target: "trie::proof_task", storage_worker_count, @@ -910,6 +946,7 @@ impl ProofWorkerHandle { let view_clone = view.clone(); let task_ctx_clone = task_ctx.clone(); let work_rx_clone = storage_work_rx.clone(); + let storage_available_workers_clone = storage_available_workers.clone(); executor.spawn_blocking(move || { #[cfg(feature = "metrics")] @@ -921,6 +958,7 @@ impl ProofWorkerHandle { task_ctx_clone, work_rx_clone, worker_id, + storage_available_workers_clone, #[cfg(feature = "metrics")] metrics, ) @@ -946,6 +984,7 @@ impl ProofWorkerHandle { let task_ctx_clone = task_ctx.clone(); let work_rx_clone = account_work_rx.clone(); let storage_work_tx_clone = storage_work_tx.clone(); + let account_available_workers_clone = account_available_workers.clone(); executor.spawn_blocking(move || { #[cfg(feature = "metrics")] @@ -958,6 +997,7 @@ impl ProofWorkerHandle { work_rx_clone, storage_work_tx_clone, worker_id, + account_available_workers_clone, #[cfg(feature = "metrics")] metrics, ) @@ -972,7 +1012,12 @@ impl ProofWorkerHandle { drop(_guard); - Self::new_handle(storage_work_tx, account_work_tx) + Self::new_handle( + storage_work_tx, + account_work_tx, + storage_available_workers, + account_available_workers, + ) } /// Creates a new [`ProofWorkerHandle`] with direct access to worker pools. @@ -981,8 +1026,35 @@ impl ProofWorkerHandle { const fn new_handle( storage_work_tx: CrossbeamSender, account_work_tx: CrossbeamSender, + storage_available_workers: Arc, + account_available_workers: Arc, ) -> Self { - Self { storage_work_tx, account_work_tx } + Self { + storage_work_tx, + account_work_tx, + storage_available_workers, + account_available_workers, + } + } + + /// Returns true if there are available storage workers to process tasks. + pub fn has_available_storage_workers(&self) -> bool { + self.storage_available_workers.load(Ordering::Relaxed) > 0 + } + + /// Returns true if there are available account workers to process tasks. + pub fn has_available_account_workers(&self) -> bool { + self.account_available_workers.load(Ordering::Relaxed) > 0 + } + + /// Returns the number of pending storage tasks in the queue. + pub fn pending_storage_tasks(&self) -> usize { + self.storage_work_tx.len() + } + + /// Returns the number of pending account tasks in the queue. + pub fn pending_account_tasks(&self) -> usize { + self.account_work_tx.len() } /// Dispatch a storage proof computation to storage worker pool From 52016d74de12b9275eb58ef05b133bce41fbd017 Mon Sep 17 00:00:00 2001 From: Ishika Choudhury <117741714+Rimeeeeee@users.noreply.github.com> Date: Thu, 23 Oct 2025 13:34:10 +0530 Subject: [PATCH 09/23] fixes --- Cargo.lock | 201 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 26 +++---- 2 files changed, 113 insertions(+), 114 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 089ebbc43e9..2b7c6cf347f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -262,7 +262,7 @@ dependencies = [ [[package]] name = "alloy-evm" version = "0.22.3" -source = "git+https://github.com/Rimeeeeee/evm?branch=temp-fix#ce1015415dfa155a76504664a8d8022df6ba579e" +source = "git+https://github.com/Rimeeeeee/evm?branch=new-approach1#4be042c9ef8f3af15f94f2eac6441525811d57b1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -375,7 +375,7 @@ dependencies = [ [[package]] name = "alloy-op-evm" version = "0.22.3" -source = "git+https://github.com/Rimeeeeee/evm?branch=temp-fix#ce1015415dfa155a76504664a8d8022df6ba579e" +source = "git+https://github.com/Rimeeeeee/evm?branch=new-approach1#4be042c9ef8f3af15f94f2eac6441525811d57b1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -516,7 +516,7 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -751,7 +751,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -767,7 +767,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "syn-solidity", "tiny-keccak", ] @@ -784,7 +784,7 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "syn-solidity", ] @@ -912,7 +912,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -997,7 +997,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1139,7 +1139,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1177,7 +1177,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1266,7 +1266,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1383,7 +1383,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1394,7 +1394,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1432,7 +1432,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1559,7 +1559,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1577,7 +1577,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1767,7 +1767,7 @@ checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "synstructure", ] @@ -1895,7 +1895,7 @@ checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2126,7 +2126,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2634,7 +2634,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2668,7 +2668,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2682,7 +2682,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2693,7 +2693,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2704,7 +2704,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2757,7 +2757,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2816,7 +2816,7 @@ checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2827,7 +2827,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2848,7 +2848,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2858,7 +2858,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2879,7 +2879,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "unicode-xid", ] @@ -2993,7 +2993,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -3072,7 +3072,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -3180,7 +3180,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -3200,7 +3200,7 @@ checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -3276,7 +3276,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -3924,7 +3924,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -4707,7 +4707,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -4764,7 +4764,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -4871,7 +4871,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -5131,7 +5131,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -5502,7 +5502,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -5568,7 +5568,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -5947,7 +5947,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6111,7 +6111,7 @@ dependencies = [ [[package]] name = "op-revm" version = "11.2.0" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "auto_impl", "revm", @@ -6272,7 +6272,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6407,7 +6407,7 @@ dependencies = [ "phf_shared 0.11.3", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6420,7 +6420,7 @@ dependencies = [ "phf_shared 0.13.1", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6458,7 +6458,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6591,7 +6591,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6642,14 +6642,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] name = "proc-macro2" -version = "1.0.101" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +checksum = "8e0f6df8eaa422d97d72edcd152e1451618fed47fabbdbd5a8864167b1d4aff7" dependencies = [ "unicode-ident", ] @@ -6738,7 +6738,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6749,7 +6749,7 @@ checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6772,7 +6772,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -7117,7 +7117,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -7530,7 +7530,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -10815,7 +10815,7 @@ dependencies = [ [[package]] name = "revm" version = "30.2.0" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "revm-bytecode", "revm-context", @@ -10833,7 +10833,7 @@ dependencies = [ [[package]] name = "revm-bytecode" version = "7.0.2" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "bitvec", "phf 0.13.1", @@ -10844,7 +10844,7 @@ dependencies = [ [[package]] name = "revm-context" version = "10.1.2" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "bitvec", "cfg-if", @@ -10860,7 +10860,7 @@ dependencies = [ [[package]] name = "revm-context-interface" version = "11.1.2" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10875,7 +10875,7 @@ dependencies = [ [[package]] name = "revm-database" version = "9.0.2" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "alloy-eips", "revm-bytecode", @@ -10888,7 +10888,7 @@ dependencies = [ [[package]] name = "revm-database-interface" version = "8.0.3" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "auto_impl", "either", @@ -10900,7 +10900,7 @@ dependencies = [ [[package]] name = "revm-handler" version = "11.2.0" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "auto_impl", "derive-where", @@ -10918,7 +10918,7 @@ dependencies = [ [[package]] name = "revm-inspector" version = "11.2.0" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "auto_impl", "either", @@ -10968,7 +10968,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "28.0.0" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -10980,7 +10980,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "28.1.1" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11004,7 +11004,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "21.0.1" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "alloy-primitives", "num_enum", @@ -11015,11 +11015,10 @@ dependencies = [ [[package]] name = "revm-state" version = "8.0.2" -source = "git+https://github.com/Rimeeeeee/revm?branch=rakita%2Fbal#4a2c5ceeceffa3646daae57d997e4cd4117c386c" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" dependencies = [ "alloy-eip7928", "bitflags 2.10.0", - "indexmap 2.12.0", "revm-bytecode", "revm-primitives", "serde", @@ -11156,7 +11155,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.107", + "syn 2.0.108", "unicode-ident", ] @@ -11597,7 +11596,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -11674,7 +11673,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -11998,7 +11997,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12010,7 +12009,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12032,9 +12031,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.107" +version = "2.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a26dbd934e5451d21ef060c018dae56fc073894c5a7896f882928a76e6d081b" +checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917" dependencies = [ "proc-macro2", "quote", @@ -12050,7 +12049,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12070,7 +12069,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12151,7 +12150,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12162,7 +12161,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "test-case-core", ] @@ -12202,7 +12201,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12250,7 +12249,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12261,7 +12260,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12426,7 +12425,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12681,7 +12680,7 @@ checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12841,7 +12840,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -13122,7 +13121,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -13203,7 +13202,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "wasm-bindgen-shared", ] @@ -13238,7 +13237,7 @@ checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -13493,7 +13492,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -13504,7 +13503,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -13515,7 +13514,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -13526,7 +13525,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -14040,7 +14039,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "synstructure", ] @@ -14052,7 +14051,7 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "synstructure", ] @@ -14073,7 +14072,7 @@ checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -14093,7 +14092,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "synstructure", ] @@ -14114,7 +14113,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -14158,7 +14157,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -14169,7 +14168,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 923017818e5..2ead9e5a87a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -768,19 +768,19 @@ alloy-transport-ws = { git = "https://github.com/Soubhik-10/alloy", branch = "ba # op-alloy-rpc-jsonrpsee = { git = "https://github.com/alloy-rs/op-alloy", rev = "a79d6fc" } # # revm-inspectors = { git = "https://github.com/paradigmxyz/revm-inspectors", rev = "1207e33" } -revm = { git = "https://github.com/Rimeeeeee/revm", branch = "rakita/bal" } -alloy-evm = { git = "https://github.com/Rimeeeeee/evm", branch = "temp-fix" } -alloy-op-evm = { git = "https://github.com/Rimeeeeee/evm", branch = "temp-fix" } -revm-bytecode = { git = "https://github.com/Rimeeeeee/revm", branch = "rakita/bal" } -revm-database = { git = "https://github.com/Rimeeeeee/revm", branch = "rakita/bal" } -revm-state = { git = "https://github.com/Rimeeeeee/revm", branch = "rakita/bal" } -revm-primitives = { git = "https://github.com/Rimeeeeee/revm", branch = "rakita/bal" } -revm-interpreter = { git = "https://github.com/Rimeeeeee/revm", branch = "rakita/bal" } -revm-inspector = { git = "https://github.com/Rimeeeeee/revm", branch = "rakita/bal" } -revm-context = { git = "https://github.com/Rimeeeeee/revm", branch = "rakita/bal" } -revm-context-interface = { git = "https://github.com/Rimeeeeee/revm", branch = "rakita/bal" } -revm-database-interface = { git = "https://github.com/Rimeeeeee/revm", branch = "rakita/bal" } -op-revm = { git = "https://github.com/Rimeeeeee/revm", branch = "rakita/bal" } +revm = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } +alloy-evm = { git = "https://github.com/Rimeeeeee/evm", branch = "new-approach1" } +alloy-op-evm = { git = "https://github.com/Rimeeeeee/evm", branch = "new-approach1" } +revm-bytecode = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } +revm-database = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } +revm-state = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } +revm-primitives = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } +revm-interpreter = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } +revm-inspector = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } +revm-context = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } +revm-context-interface = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } +revm-database-interface = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } +op-revm = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } # # jsonrpsee = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } # jsonrpsee-core = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } From b2236d1db7826921ade7cff141848ef265c2bce6 Mon Sep 17 00:00:00 2001 From: Fallengirl <155266340+Fallengirl@users.noreply.github.com> Date: Thu, 23 Oct 2025 13:20:59 +0200 Subject: [PATCH 10/23] docs: correct Payment tx type from 0x7E to 0x2A (#19255) --- examples/custom-node/src/pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/custom-node/src/pool.rs b/examples/custom-node/src/pool.rs index 0959b3bcae0..8828803a0f3 100644 --- a/examples/custom-node/src/pool.rs +++ b/examples/custom-node/src/pool.rs @@ -17,7 +17,7 @@ pub enum CustomPooledTransaction { /// A regular Optimism transaction as defined by [`OpPooledTransaction`]. #[envelope(flatten)] Op(OpPooledTransaction), - /// A [`TxPayment`] tagged with type 0x7E. + /// A [`TxPayment`] tagged with type 0x2A (decimal 42). #[envelope(ty = 42)] Payment(Signed), } From ce876a96ad30509fb00c2d2c9a7d4cd5b4470a9f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 Oct 2025 13:39:12 +0200 Subject: [PATCH 11/23] fix: use network id in p2p command (#19252) --- crates/cli/commands/src/p2p/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index 792d4533856..c72ceca78e6 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -192,6 +192,7 @@ impl DownloadArgs { let net = NetworkConfigBuilder::::new(p2p_secret_key) .peer_config(config.peers_config_with_basic_nodes_from_file(None)) .external_ip_resolver(self.network.nat) + .network_id(self.network.network_id) .boot_nodes(boot_nodes.clone()) .apply(|builder| { self.network.discovery.apply_to_builder(builder, rlpx_socket, boot_nodes) From 71f91cf4eb4b7ab4512885b8c00096b5d4fe10b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Hodul=C3=A1k?= Date: Thu, 23 Oct 2025 13:43:24 +0200 Subject: [PATCH 12/23] feat(prune): Add an empty `reth-prune-db` crate (#19232) --- Cargo.lock | 4 ++++ Cargo.toml | 1 + crates/prune/db/Cargo.toml | 15 +++++++++++++++ crates/prune/db/src/lib.rs | 1 + 4 files changed, 21 insertions(+) create mode 100644 crates/prune/db/Cargo.toml create mode 100644 crates/prune/db/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 90aed93b946..6839523354a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9812,6 +9812,10 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-prune-db" +version = "1.8.2" + [[package]] name = "reth-prune-types" version = "1.8.2" diff --git a/Cargo.toml b/Cargo.toml index ae7956ef489..324135b2233 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,6 +93,7 @@ members = [ "crates/payload/util/", "crates/primitives-traits/", "crates/primitives/", + "crates/prune/db", "crates/prune/prune", "crates/prune/types", "crates/ress/protocol", diff --git a/crates/prune/db/Cargo.toml b/crates/prune/db/Cargo.toml new file mode 100644 index 00000000000..269a87bf7b6 --- /dev/null +++ b/crates/prune/db/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "reth-prune-db" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true +description = "Database integration with prune implementation" + +[dependencies] + +[lints] +workspace = true diff --git a/crates/prune/db/src/lib.rs b/crates/prune/db/src/lib.rs new file mode 100644 index 00000000000..ef777085e54 --- /dev/null +++ b/crates/prune/db/src/lib.rs @@ -0,0 +1 @@ +//! An integration of `reth-prune` with `reth-db`. From c54719145bdab1e78c0ccd854e7e28b0b8019d15 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 Oct 2025 14:43:56 +0200 Subject: [PATCH 13/23] fix: use known paris activation blocks in genesis parsing (#19258) --- crates/chainspec/src/spec.rs | 143 +++++++++++++++++++++++++++++------ 1 file changed, 121 insertions(+), 22 deletions(-) diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index a0cccfcc449..e8d16886aac 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -3,7 +3,12 @@ use alloy_evm::eth::spec::EthExecutorSpec; use crate::{ constants::{MAINNET_DEPOSIT_CONTRACT, MAINNET_PRUNE_DELETE_LIMIT}, - holesky, hoodi, sepolia, EthChainSpec, + ethereum::SEPOLIA_PARIS_TTD, + holesky, hoodi, + mainnet::{MAINNET_PARIS_BLOCK, MAINNET_PARIS_TTD}, + sepolia, + sepolia::SEPOLIA_PARIS_BLOCK, + EthChainSpec, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; @@ -100,7 +105,7 @@ pub static MAINNET: LazyLock> = LazyLock::new(|| { genesis, // paris_block_and_final_difficulty: Some(( - 15537394, + MAINNET_PARIS_BLOCK, U256::from(58_750_003_716_598_352_816_469u128), )), hardforks, @@ -127,7 +132,10 @@ pub static SEPOLIA: LazyLock> = LazyLock::new(|| { ), genesis, // - paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))), + paris_block_and_final_difficulty: Some(( + SEPOLIA_PARIS_BLOCK, + U256::from(17_000_018_015_853_232u128), + )), hardforks, // https://sepolia.etherscan.io/tx/0x025ecbf81a2f1220da6285d1701dc89fb5a956b62562ee922e1a9efd73eb4b14 deposit_contract: Some(DepositContract::new( @@ -678,26 +686,50 @@ impl From for ChainSpec { // We expect no new networks to be configured with the merge, so we ignore the TTD field // and merge netsplit block from external genesis files. All existing networks that have // merged should have a static ChainSpec already (namely mainnet and sepolia). - let paris_block_and_final_difficulty = - if let Some(ttd) = genesis.config.terminal_total_difficulty { - hardforks.push(( - EthereumHardfork::Paris.boxed(), - ForkCondition::TTD { - // NOTE: this will not work properly if the merge is not activated at - // genesis, and there is no merge netsplit block - activation_block_number: genesis - .config - .merge_netsplit_block - .unwrap_or_default(), - total_difficulty: ttd, - fork_block: genesis.config.merge_netsplit_block, - }, - )); + let paris_block_and_final_difficulty = if let Some(ttd) = + genesis.config.terminal_total_difficulty + { + hardforks.push(( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { + // NOTE: this will not work properly if the merge is not activated at + // genesis, and there is no merge netsplit block + activation_block_number: genesis + .config + .merge_netsplit_block + .or_else(|| { + // due to this limitation we can't determine the merge block, + // this is the case for perfnet testing for example + // at the time of this fix, only two networks transitioned: MAINNET + + // SEPOLIA and this parsing from genesis is used for shadowforking, so + // we can reasonably assume that if the TTD and the chainid matches + // those networks we use the activation + // blocks of those networks + match genesis.config.chain_id { + 1 => { + if ttd == MAINNET_PARIS_TTD { + return Some(MAINNET_PARIS_BLOCK) + } + } + 11155111 => { + if ttd == SEPOLIA_PARIS_TTD { + return Some(SEPOLIA_PARIS_BLOCK) + } + } + _ => {} + }; + None + }) + .unwrap_or_default(), + total_difficulty: ttd, + fork_block: genesis.config.merge_netsplit_block, + }, + )); - genesis.config.merge_netsplit_block.map(|block| (block, ttd)) - } else { - None - }; + genesis.config.merge_netsplit_block.map(|block| (block, ttd)) + } else { + None + }; // Time-based hardforks let time_hardfork_opts = [ @@ -2647,4 +2679,71 @@ Post-merge hard forks (timestamp based): }; assert_eq!(hardfork_params, expected); } + + #[test] + fn parse_perf_net_genesis() { + let s = r#"{ + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficulty": 58750000000000000000000, + "terminalTotalDifficultyPassed": true, + "shanghaiTime": 1681338455, + "cancunTime": 1710338135, + "pragueTime": 1746612311, + "ethash": {}, + "depositContractAddress": "0x00000000219ab540356cBB839Cbe05303d7705Fa", + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "prague": { + "target": 6, + "max": 9, + "baseFeeUpdateFraction": 5007716 + } + } + }, + "nonce": "0x42", + "timestamp": "0x0", + "extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa", + "gasLimit": "0x1388", + "difficulty": "0x400000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas": null +}"#; + + let genesis = serde_json::from_str::(s).unwrap(); + let chainspec = ChainSpec::from_genesis(genesis); + let activation = chainspec.hardforks.fork(EthereumHardfork::Paris); + assert_eq!( + activation, + ForkCondition::TTD { + activation_block_number: MAINNET_PARIS_BLOCK, + total_difficulty: MAINNET_PARIS_TTD, + fork_block: None, + } + ) + } } From 75931f8772a21b260927f1acce85f8e6a33f034d Mon Sep 17 00:00:00 2001 From: Fallengirl <155266340+Fallengirl@users.noreply.github.com> Date: Thu, 23 Oct 2025 15:13:03 +0200 Subject: [PATCH 14/23] chore: align env filter comment with configured directives (#19237) --- crates/tracing/src/layers.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 156bd8c8253..210c0066308 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -18,8 +18,9 @@ pub type FileWorkerGuard = tracing_appender::non_blocking::WorkerGuard; /// A boxed tracing [Layer]. pub(crate) type BoxedLayer = Box + Send + Sync>; -/// Default [directives](Directive) for [`EnvFilter`] which disables high-frequency debug logs from -/// `hyper`, `hickory-resolver`, `jsonrpsee-server`, and `discv5`. +/// Default [directives](Directive) for [`EnvFilter`] which disable high-frequency debug logs from +/// dependencies such as `hyper`, `hickory-resolver`, `hickory_proto`, `discv5`, `jsonrpsee-server`, +/// the `opentelemetry_*` crates, and `hyper_util::client::legacy::pool`. const DEFAULT_ENV_FILTER_DIRECTIVES: [&str; 9] = [ "hyper::proto::h1=off", "hickory_resolver=off", From 3d3a05386a598500c926d3c1319beba7e1616ea2 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 23 Oct 2025 14:31:15 +0100 Subject: [PATCH 15/23] refactor(static-file): remove unused segments (#19209) --- .../static-file/src/segments/headers.rs | 54 ----------- .../static-file/src/segments/mod.rs | 6 -- .../static-file/src/segments/transactions.rs | 60 ------------ .../static-file/src/static_file_producer.rs | 95 ++++--------------- crates/static-file/types/src/lib.rs | 94 +++--------------- .../src/providers/static_file/manager.rs | 2 - 6 files changed, 33 insertions(+), 278 deletions(-) delete mode 100644 crates/static-file/static-file/src/segments/headers.rs delete mode 100644 crates/static-file/static-file/src/segments/transactions.rs diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs deleted file mode 100644 index 990e33ee52a..00000000000 --- a/crates/static-file/static-file/src/segments/headers.rs +++ /dev/null @@ -1,54 +0,0 @@ -use crate::segments::Segment; -use alloy_primitives::BlockNumber; -use reth_codecs::Compact; -use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{providers::StaticFileWriter, DBProvider, StaticFileProviderFactory}; -use reth_static_file_types::StaticFileSegment; -use reth_storage_errors::provider::ProviderResult; -use std::ops::RangeInclusive; - -/// Static File segment responsible for [`StaticFileSegment::Headers`] part of data. -#[derive(Debug, Default)] -pub struct Headers; - -impl Segment for Headers -where - Provider: StaticFileProviderFactory> - + DBProvider, -{ - fn segment(&self) -> StaticFileSegment { - StaticFileSegment::Headers - } - - fn copy_to_static_files( - &self, - provider: Provider, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let static_file_provider = provider.static_file_provider(); - let mut static_file_writer = - static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Headers)?; - - let mut headers_cursor = provider - .tx_ref() - .cursor_read::::BlockHeader>>( - )?; - let headers_walker = headers_cursor.walk_range(block_range.clone())?; - - let mut canonical_headers_cursor = - provider.tx_ref().cursor_read::()?; - let canonical_headers_walker = canonical_headers_cursor.walk_range(block_range)?; - - for (header_entry, canonical_header_entry) in headers_walker.zip(canonical_headers_walker) { - let (header_block, header) = header_entry?; - let (canonical_header_block, canonical_header) = canonical_header_entry?; - - debug_assert_eq!(header_block, canonical_header_block); - - static_file_writer.append_header(&header, &canonical_header)?; - } - - Ok(()) - } -} diff --git a/crates/static-file/static-file/src/segments/mod.rs b/crates/static-file/static-file/src/segments/mod.rs index fc79effdd5a..a1499a2eaa8 100644 --- a/crates/static-file/static-file/src/segments/mod.rs +++ b/crates/static-file/static-file/src/segments/mod.rs @@ -1,11 +1,5 @@ //! `StaticFile` segment implementations and utilities. -mod transactions; -pub use transactions::Transactions; - -mod headers; -pub use headers::Headers; - mod receipts; pub use receipts::Receipts; diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs deleted file mode 100644 index 74cb58ed708..00000000000 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ /dev/null @@ -1,60 +0,0 @@ -use crate::segments::Segment; -use alloy_primitives::BlockNumber; -use reth_codecs::Compact; -use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, -}; -use reth_static_file_types::StaticFileSegment; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use std::ops::RangeInclusive; - -/// Static File segment responsible for [`StaticFileSegment::Transactions`] part of data. -#[derive(Debug, Default)] -pub struct Transactions; - -impl Segment for Transactions -where - Provider: StaticFileProviderFactory> - + DBProvider - + BlockReader, -{ - fn segment(&self) -> StaticFileSegment { - StaticFileSegment::Transactions - } - - /// Write transactions from database table [`tables::Transactions`] to static files with segment - /// [`StaticFileSegment::Transactions`] for the provided block range. - fn copy_to_static_files( - &self, - provider: Provider, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let static_file_provider = provider.static_file_provider(); - let mut static_file_writer = static_file_provider - .get_writer(*block_range.start(), StaticFileSegment::Transactions)?; - - for block in block_range { - static_file_writer.increment_block(block)?; - - let block_body_indices = provider - .block_body_indices(block)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; - - let mut transactions_cursor = provider.tx_ref().cursor_read::::SignedTx, - >>()?; - let transactions_walker = - transactions_cursor.walk_range(block_body_indices.tx_num_range())?; - - for entry in transactions_walker { - let (tx_number, transaction) = entry?; - - static_file_writer.append_transaction(tx_number, &transaction)?; - } - } - - Ok(()) - } -} diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 185fbf7c498..2e7aa4b9df4 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -131,12 +131,6 @@ where let mut segments = Vec::<(Box>, RangeInclusive)>::new(); - if let Some(block_range) = targets.transactions.clone() { - segments.push((Box::new(segments::Transactions), block_range)); - } - if let Some(block_range) = targets.headers.clone() { - segments.push((Box::new(segments::Headers), block_range)); - } if let Some(block_range) = targets.receipts.clone() { segments.push((Box::new(segments::Receipts), block_range)); } @@ -178,16 +172,11 @@ where /// Returns highest block numbers for all static file segments. pub fn copy_to_static_files(&self) -> ProviderResult { let provider = self.provider.database_provider_ro()?; - let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] - .into_iter() + let stages_checkpoints = std::iter::once(StageId::Execution) .map(|stage| provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number))) .collect::, _>>()?; - let highest_static_files = HighestStaticFiles { - headers: stages_checkpoints[0], - receipts: stages_checkpoints[1], - transactions: stages_checkpoints[2], - }; + let highest_static_files = HighestStaticFiles { receipts: stages_checkpoints[0] }; let targets = self.get_static_file_targets(highest_static_files)?; self.run(targets)?; @@ -204,26 +193,17 @@ where let highest_static_files = self.provider.static_file_provider().get_highest_static_files(); let targets = StaticFileTargets { - headers: finalized_block_numbers.headers.and_then(|finalized_block_number| { - self.get_static_file_target(highest_static_files.headers, finalized_block_number) - }), - receipts: finalized_block_numbers - .receipts - // StaticFile receipts only if they're not pruned according to the user - // configuration - .filter(|_| !self.prune_modes.has_receipts_pruning()) - .and_then(|finalized_block_number| { + // StaticFile receipts only if they're not pruned according to the user configuration + receipts: if self.prune_modes.receipts.is_none() { + finalized_block_numbers.receipts.and_then(|finalized_block_number| { self.get_static_file_target( highest_static_files.receipts, finalized_block_number, ) - }), - transactions: finalized_block_numbers.transactions.and_then(|finalized_block_number| { - self.get_static_file_target( - highest_static_files.transactions, - finalized_block_number, - ) - }), + }) + } else { + None + }, }; trace!( @@ -313,69 +293,36 @@ mod tests { StaticFileProducerInner::new(provider_factory.clone(), PruneModes::default()); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(1), - receipts: Some(1), - transactions: Some(1), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(1) }) .expect("get static file targets"); - assert_eq!( - targets, - StaticFileTargets { - headers: Some(0..=1), - receipts: Some(0..=1), - transactions: Some(0..=1) - } - ); + assert_eq!(targets, StaticFileTargets { receipts: Some(0..=1) }); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(1), receipts: Some(1), transactions: Some(1) } + HighestStaticFiles { receipts: Some(1) } ); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(3), - receipts: Some(3), - transactions: Some(3), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(3) }) .expect("get static file targets"); - assert_eq!( - targets, - StaticFileTargets { - headers: Some(2..=3), - receipts: Some(2..=3), - transactions: Some(2..=3) - } - ); + assert_eq!(targets, StaticFileTargets { receipts: Some(2..=3) }); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } + HighestStaticFiles { receipts: Some(3) } ); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(4), - receipts: Some(4), - transactions: Some(4), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(4) }) .expect("get static file targets"); - assert_eq!( - targets, - StaticFileTargets { - headers: Some(4..=4), - receipts: Some(4..=4), - transactions: Some(4..=4) - } - ); + assert_eq!(targets, StaticFileTargets { receipts: Some(4..=4) }); assert_matches!( static_file_producer.run(targets), Err(ProviderError::BlockBodyIndicesNotFound(4)) ); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } + HighestStaticFiles { receipts: Some(3) } ); } @@ -399,11 +346,7 @@ mod tests { std::thread::sleep(Duration::from_millis(100)); } let targets = locked_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(1), - receipts: Some(1), - transactions: Some(1), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(1) }) .expect("get static file targets"); assert_matches!(locked_producer.run(targets.clone()), Ok(_)); tx.send(targets).unwrap(); diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 53be4f6d1c1..9606b0ec98b 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -27,39 +27,15 @@ pub const DEFAULT_BLOCKS_PER_STATIC_FILE: u64 = 500_000; /// Highest static file block numbers, per data segment. #[derive(Debug, Clone, Copy, Default, Eq, PartialEq)] pub struct HighestStaticFiles { - /// Highest static file block of headers, inclusive. - /// If [`None`], no static file is available. - pub headers: Option, /// Highest static file block of receipts, inclusive. /// If [`None`], no static file is available. pub receipts: Option, - /// Highest static file block of transactions, inclusive. - /// If [`None`], no static file is available. - pub transactions: Option, } impl HighestStaticFiles { - /// Returns the highest static file if it exists for a segment - pub const fn highest(&self, segment: StaticFileSegment) -> Option { - match segment { - StaticFileSegment::Headers => self.headers, - StaticFileSegment::Transactions => self.transactions, - StaticFileSegment::Receipts => self.receipts, - } - } - - /// Returns a mutable reference to a static file segment - pub const fn as_mut(&mut self, segment: StaticFileSegment) -> &mut Option { - match segment { - StaticFileSegment::Headers => &mut self.headers, - StaticFileSegment::Transactions => &mut self.transactions, - StaticFileSegment::Receipts => &mut self.receipts, - } - } - /// Returns an iterator over all static file segments fn iter(&self) -> impl Iterator> { - [self.headers, self.transactions, self.receipts].into_iter() + [self.receipts].into_iter() } /// Returns the minimum block of all segments. @@ -76,36 +52,28 @@ impl HighestStaticFiles { /// Static File targets, per data segment, measured in [`BlockNumber`]. #[derive(Debug, Clone, Eq, PartialEq)] pub struct StaticFileTargets { - /// Targeted range of headers. - pub headers: Option>, /// Targeted range of receipts. pub receipts: Option>, - /// Targeted range of transactions. - pub transactions: Option>, } impl StaticFileTargets { /// Returns `true` if any of the targets are [Some]. pub const fn any(&self) -> bool { - self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() + self.receipts.is_some() } /// Returns `true` if all targets are either [`None`] or has beginning of the range equal to the /// highest static file. pub fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { - [ - (self.headers.as_ref(), static_files.headers), - (self.receipts.as_ref(), static_files.receipts), - (self.transactions.as_ref(), static_files.transactions), - ] - .iter() - .all(|(target_block_range, highest_static_file_block)| { - target_block_range.is_none_or(|target_block_range| { - *target_block_range.start() == - highest_static_file_block - .map_or(0, |highest_static_file_block| highest_static_file_block + 1) - }) - }) + core::iter::once(&(self.receipts.as_ref(), static_files.receipts)).all( + |(target_block_range, highest_static_file_block)| { + target_block_range.is_none_or(|target_block_range| { + *target_block_range.start() == + highest_static_file_block + .map_or(0, |highest_static_file_block| highest_static_file_block + 1) + }) + }, + ) } } @@ -123,42 +91,9 @@ pub const fn find_fixed_range( mod tests { use super::*; - #[test] - fn test_highest_static_files_highest() { - let files = - HighestStaticFiles { headers: Some(100), receipts: Some(200), transactions: None }; - - // Test for headers segment - assert_eq!(files.highest(StaticFileSegment::Headers), Some(100)); - - // Test for receipts segment - assert_eq!(files.highest(StaticFileSegment::Receipts), Some(200)); - - // Test for transactions segment - assert_eq!(files.highest(StaticFileSegment::Transactions), None); - } - - #[test] - fn test_highest_static_files_as_mut() { - let mut files = HighestStaticFiles::default(); - - // Modify headers value - *files.as_mut(StaticFileSegment::Headers) = Some(150); - assert_eq!(files.headers, Some(150)); - - // Modify receipts value - *files.as_mut(StaticFileSegment::Receipts) = Some(250); - assert_eq!(files.receipts, Some(250)); - - // Modify transactions value - *files.as_mut(StaticFileSegment::Transactions) = Some(350); - assert_eq!(files.transactions, Some(350)); - } - #[test] fn test_highest_static_files_min() { - let files = - HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: None }; + let files = HighestStaticFiles { receipts: Some(100) }; // Minimum value among the available segments assert_eq!(files.min_block_num(), Some(100)); @@ -170,11 +105,10 @@ mod tests { #[test] fn test_highest_static_files_max() { - let files = - HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: Some(500) }; + let files = HighestStaticFiles { receipts: Some(100) }; // Maximum value among the available segments - assert_eq!(files.max_block_num(), Some(500)); + assert_eq!(files.max_block_num(), Some(100)); let empty_files = HighestStaticFiles::default(); // No values, should return None diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 76fa45f5a56..d066a704a24 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1062,9 +1062,7 @@ impl StaticFileProvider { /// Gets the highest static file block for all segments. pub fn get_highest_static_files(&self) -> HighestStaticFiles { HighestStaticFiles { - headers: self.get_highest_static_file_block(StaticFileSegment::Headers), receipts: self.get_highest_static_file_block(StaticFileSegment::Receipts), - transactions: self.get_highest_static_file_block(StaticFileSegment::Transactions), } } From f3b9349d6f0722b1c089ea4b9e0c1fb5615025d7 Mon Sep 17 00:00:00 2001 From: Ragnar Date: Thu, 23 Oct 2025 15:34:51 +0200 Subject: [PATCH 16/23] docs: add usage examples and documentation to NoopConsensus (#19194) --- crates/consensus/consensus/src/noop.rs | 28 ++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 259fae27d67..3d6818ca306 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,9 +1,32 @@ +//! A consensus implementation that does nothing. +//! +//! This module provides `NoopConsensus`, a consensus implementation that performs no validation +//! and always returns `Ok(())` for all validation methods. Useful for testing and scenarios +//! where consensus validation is not required. +//! +//! # Examples +//! +//! ```rust +//! use reth_consensus::noop::NoopConsensus; +//! use std::sync::Arc; +//! +//! let consensus = NoopConsensus::default(); +//! let consensus_arc = NoopConsensus::arc(); +//! ``` +//! +//! # Warning +//! +//! **Not for production use** - provides no security guarantees or consensus validation. + use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; use alloc::sync::Arc; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; /// A Consensus implementation that does nothing. +/// +/// Always returns `Ok(())` for all validation methods. Suitable for testing and scenarios +/// where consensus validation is not required. #[derive(Debug, Copy, Clone, Default)] #[non_exhaustive] pub struct NoopConsensus; @@ -16,10 +39,12 @@ impl NoopConsensus { } impl HeaderValidator for NoopConsensus { + /// Validates a header (no-op implementation). fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { Ok(()) } + /// Validates a header against its parent (no-op implementation). fn validate_header_against_parent( &self, _header: &SealedHeader, @@ -32,6 +57,7 @@ impl HeaderValidator for NoopConsensus { impl Consensus for NoopConsensus { type Error = ConsensusError; + /// Validates body against header (no-op implementation). fn validate_body_against_header( &self, _body: &B::Body, @@ -40,12 +66,14 @@ impl Consensus for NoopConsensus { Ok(()) } + /// Validates block before execution (no-op implementation). fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { Ok(()) } } impl FullConsensus for NoopConsensus { + /// Validates block after execution (no-op implementation). fn validate_block_post_execution( &self, _block: &RecoveredBlock, From 81b1949c3c6eba2c839f6f5982af0b27ccf09a19 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 23 Oct 2025 15:06:04 +0100 Subject: [PATCH 17/23] fix(cli): prune CLI argument names (#19215) --- crates/node/core/src/args/pruning.rs | 33 +++++++++++++++----------- docs/vocs/docs/pages/cli/reth/node.mdx | 26 ++++++++++---------- 2 files changed, 32 insertions(+), 27 deletions(-) diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index b5c782e62bf..2ff67446bbf 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -18,33 +18,33 @@ pub struct PruningArgs { pub full: bool, /// Minimum pruning interval measured in blocks. - #[arg(long, value_parser = RangedU64ValueParser::::new().range(1..),)] + #[arg(long = "prune.block-interval", alias = "block-interval", value_parser = RangedU64ValueParser::::new().range(1..))] pub block_interval: Option, // Sender Recovery /// Prunes all sender recovery data. - #[arg(long = "prune.senderrecovery.full", conflicts_with_all = &["sender_recovery_distance", "sender_recovery_before"])] + #[arg(long = "prune.sender-recovery.full", alias = "prune.senderrecovery.full", conflicts_with_all = &["sender_recovery_distance", "sender_recovery_before"])] pub sender_recovery_full: bool, /// Prune sender recovery data before the `head-N` block number. In other words, keep last N + /// 1 blocks. - #[arg(long = "prune.senderrecovery.distance", value_name = "BLOCKS", conflicts_with_all = &["sender_recovery_full", "sender_recovery_before"])] + #[arg(long = "prune.sender-recovery.distance", alias = "prune.senderrecovery.distance", value_name = "BLOCKS", conflicts_with_all = &["sender_recovery_full", "sender_recovery_before"])] pub sender_recovery_distance: Option, /// Prune sender recovery data before the specified block number. The specified block number is /// not pruned. - #[arg(long = "prune.senderrecovery.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["sender_recovery_full", "sender_recovery_distance"])] + #[arg(long = "prune.sender-recovery.before", alias = "prune.senderrecovery.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["sender_recovery_full", "sender_recovery_distance"])] pub sender_recovery_before: Option, // Transaction Lookup /// Prunes all transaction lookup data. - #[arg(long = "prune.transactionlookup.full", conflicts_with_all = &["transaction_lookup_distance", "transaction_lookup_before"])] + #[arg(long = "prune.transaction-lookup.full", alias = "prune.transactionlookup.full", conflicts_with_all = &["transaction_lookup_distance", "transaction_lookup_before"])] pub transaction_lookup_full: bool, /// Prune transaction lookup data before the `head-N` block number. In other words, keep last N /// + 1 blocks. - #[arg(long = "prune.transactionlookup.distance", value_name = "BLOCKS", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_before"])] + #[arg(long = "prune.transaction-lookup.distance", alias = "prune.transactionlookup.distance", value_name = "BLOCKS", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_before"])] pub transaction_lookup_distance: Option, /// Prune transaction lookup data before the specified block number. The specified block number /// is not pruned. - #[arg(long = "prune.transactionlookup.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_distance"])] + #[arg(long = "prune.transaction-lookup.before", alias = "prune.transactionlookup.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_distance"])] pub transaction_lookup_before: Option, // Receipts @@ -61,33 +61,38 @@ pub struct PruningArgs { #[arg(long = "prune.receipts.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance"])] pub receipts_before: Option, /// Receipts Log Filter - #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", hide = true)] + #[arg( + long = "prune.receipts-log-filter", + alias = "prune.receiptslogfilter", + value_name = "FILTER_CONFIG", + hide = true + )] #[deprecated] pub receipts_log_filter: Option, // Account History /// Prunes all account history. - #[arg(long = "prune.accounthistory.full", conflicts_with_all = &["account_history_distance", "account_history_before"])] + #[arg(long = "prune.account-history.full", alias = "prune.accounthistory.full", conflicts_with_all = &["account_history_distance", "account_history_before"])] pub account_history_full: bool, /// Prune account before the `head-N` block number. In other words, keep last N + 1 blocks. - #[arg(long = "prune.accounthistory.distance", value_name = "BLOCKS", conflicts_with_all = &["account_history_full", "account_history_before"])] + #[arg(long = "prune.account-history.distance", alias = "prune.accounthistory.distance", value_name = "BLOCKS", conflicts_with_all = &["account_history_full", "account_history_before"])] pub account_history_distance: Option, /// Prune account history before the specified block number. The specified block number is not /// pruned. - #[arg(long = "prune.accounthistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["account_history_full", "account_history_distance"])] + #[arg(long = "prune.account-history.before", alias = "prune.accounthistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["account_history_full", "account_history_distance"])] pub account_history_before: Option, // Storage History /// Prunes all storage history data. - #[arg(long = "prune.storagehistory.full", conflicts_with_all = &["storage_history_distance", "storage_history_before"])] + #[arg(long = "prune.storage-history.full", alias = "prune.storagehistory.full", conflicts_with_all = &["storage_history_distance", "storage_history_before"])] pub storage_history_full: bool, /// Prune storage history before the `head-N` block number. In other words, keep last N + 1 /// blocks. - #[arg(long = "prune.storagehistory.distance", value_name = "BLOCKS", conflicts_with_all = &["storage_history_full", "storage_history_before"])] + #[arg(long = "prune.storage-history.distance", alias = "prune.storagehistory.distance", value_name = "BLOCKS", conflicts_with_all = &["storage_history_full", "storage_history_before"])] pub storage_history_distance: Option, /// Prune storage history before the specified block number. The specified block number is not /// pruned. - #[arg(long = "prune.storagehistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["storage_history_full", "storage_history_distance"])] + #[arg(long = "prune.storage-history.before", alias = "prune.storagehistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["storage_history_full", "storage_history_distance"])] pub storage_history_before: Option, // Bodies diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 3fc6988dc69..7b70afe44c9 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -743,25 +743,25 @@ Pruning: --full Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored - --block-interval + --prune.block-interval Minimum pruning interval measured in blocks - --prune.senderrecovery.full + --prune.sender-recovery.full Prunes all sender recovery data - --prune.senderrecovery.distance + --prune.sender-recovery.distance Prune sender recovery data before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.senderrecovery.before + --prune.sender-recovery.before Prune sender recovery data before the specified block number. The specified block number is not pruned - --prune.transactionlookup.full + --prune.transaction-lookup.full Prunes all transaction lookup data - --prune.transactionlookup.distance + --prune.transaction-lookup.distance Prune transaction lookup data before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.transactionlookup.before + --prune.transaction-lookup.before Prune transaction lookup data before the specified block number. The specified block number is not pruned --prune.receipts.full @@ -776,22 +776,22 @@ Pruning: --prune.receipts.before Prune receipts before the specified block number. The specified block number is not pruned - --prune.accounthistory.full + --prune.account-history.full Prunes all account history - --prune.accounthistory.distance + --prune.account-history.distance Prune account before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.accounthistory.before + --prune.account-history.before Prune account history before the specified block number. The specified block number is not pruned - --prune.storagehistory.full + --prune.storage-history.full Prunes all storage history data - --prune.storagehistory.distance + --prune.storage-history.distance Prune storage history before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.storagehistory.before + --prune.storage-history.before Prune storage history before the specified block number. The specified block number is not pruned --prune.bodies.pre-merge From 7b7f563987d76a21aa4980738aa7fc9cb4561e98 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 23 Oct 2025 10:38:32 -0400 Subject: [PATCH 18/23] fix(engine): shrink tries after clearing (#19159) --- .../configured_sparse_trie.rs | 14 +++++ .../tree/src/tree/payload_processor/mod.rs | 35 +++++++++++- crates/trie/sparse-parallel/src/lower.rs | 22 ++++++++ crates/trie/sparse-parallel/src/trie.rs | 55 +++++++++++++++++++ crates/trie/sparse/src/state.rs | 51 +++++++++++++++++ crates/trie/sparse/src/traits.rs | 8 +++ crates/trie/sparse/src/trie.rs | 32 +++++++++++ 7 files changed, 215 insertions(+), 2 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index 90e8928dba2..9e8f787823a 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -186,4 +186,18 @@ impl SparseTrieInterface for ConfiguredSparseTrie { Self::Parallel(trie) => trie.value_capacity(), } } + + fn shrink_nodes_to(&mut self, size: usize) { + match self { + Self::Serial(trie) => trie.shrink_nodes_to(size), + Self::Parallel(trie) => trie.shrink_nodes_to(size), + } + } + + fn shrink_values_to(&mut self, size: usize) { + match self { + Self::Serial(trie) => trie.shrink_values_to(size), + Self::Parallel(trie) => trie.shrink_values_to(size), + } + } } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 8ab186dea5b..bf3d7268ea5 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -66,6 +66,29 @@ use configured_sparse_trie::ConfiguredSparseTrie; pub const PARALLEL_SPARSE_TRIE_PARALLELISM_THRESHOLDS: ParallelismThresholds = ParallelismThresholds { min_revealed_nodes: 100, min_updated_nodes: 100 }; +/// Default node capacity for shrinking the sparse trie. This is used to limit the number of trie +/// nodes in allocated sparse tries. +/// +/// Node maps have a key of `Nibbles` and value of `SparseNode`. +/// The `size_of::` is 40, and `size_of::` is 80. +/// +/// If we have 1 million entries of 120 bytes each, this conservative estimate comes out at around +/// 120MB. +pub const SPARSE_TRIE_MAX_NODES_SHRINK_CAPACITY: usize = 1_000_000; + +/// Default value capacity for shrinking the sparse trie. This is used to limit the number of values +/// in allocated sparse tries. +/// +/// There are storage and account values, the largest of the two being account values, which are +/// essentially `TrieAccount`s. +/// +/// Account value maps have a key of `Nibbles` and value of `TrieAccount`. +/// The `size_of::` is 40, and `size_of::` is 104. +/// +/// If we have 1 million entries of 144 bytes each, this conservative estimate comes out at around +/// 144MB. +pub const SPARSE_TRIE_MAX_VALUES_SHRINK_CAPACITY: usize = 1_000_000; + /// Entrypoint for executing the payload. #[derive(Debug)] pub struct PayloadProcessor @@ -439,11 +462,19 @@ where // Send state root computation result let _ = state_root_tx.send(result); - // Clear the SparseStateTrie and replace it back into the mutex _after_ sending + // Clear the SparseStateTrie, shrink, and replace it back into the mutex _after_ sending // results to the next step, so that time spent clearing doesn't block the step after // this one. let _enter = debug_span!(target: "engine::tree::payload_processor", "clear").entered(); - cleared_sparse_trie.lock().replace(ClearedSparseStateTrie::from_state_trie(trie)); + let mut cleared_trie = ClearedSparseStateTrie::from_state_trie(trie); + + // Shrink the sparse trie so that we don't have ever increasing memory. + cleared_trie.shrink_to( + SPARSE_TRIE_MAX_NODES_SHRINK_CAPACITY, + SPARSE_TRIE_MAX_VALUES_SHRINK_CAPACITY, + ); + + cleared_sparse_trie.lock().replace(cleared_trie); }); } } diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs index b5454dd3970..bc8ae006074 100644 --- a/crates/trie/sparse-parallel/src/lower.rs +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -122,4 +122,26 @@ impl LowerSparseSubtrie { Self::Blind(None) => 0, } } + + /// Shrinks the capacity of the subtrie's node storage. + /// Works for both revealed and blind tries with allocated storage. + pub(crate) fn shrink_nodes_to(&mut self, size: usize) { + match self { + Self::Revealed(trie) | Self::Blind(Some(trie)) => { + trie.shrink_nodes_to(size); + } + Self::Blind(None) => {} + } + } + + /// Shrinks the capacity of the subtrie's value storage. + /// Works for both revealed and blind tries with allocated storage. + pub(crate) fn shrink_values_to(&mut self, size: usize) { + match self { + Self::Revealed(trie) | Self::Blind(Some(trie)) => { + trie.shrink_values_to(size); + } + Self::Blind(None) => {} + } + } } diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 5e5a838f414..34c1ff2a963 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -883,6 +883,42 @@ impl SparseTrieInterface for ParallelSparseTrie { self.upper_subtrie.value_capacity() + self.lower_subtries.iter().map(|trie| trie.value_capacity()).sum::() } + + fn shrink_nodes_to(&mut self, size: usize) { + // Distribute the capacity across upper and lower subtries + // + // Always include upper subtrie, plus any lower subtries + let total_subtries = 1 + NUM_LOWER_SUBTRIES; + let size_per_subtrie = size / total_subtries; + + // Shrink the upper subtrie + self.upper_subtrie.shrink_nodes_to(size_per_subtrie); + + // Shrink lower subtries (works for both revealed and blind with allocation) + for subtrie in &mut self.lower_subtries { + subtrie.shrink_nodes_to(size_per_subtrie); + } + + // shrink masks maps + self.branch_node_hash_masks.shrink_to(size); + self.branch_node_tree_masks.shrink_to(size); + } + + fn shrink_values_to(&mut self, size: usize) { + // Distribute the capacity across upper and lower subtries + // + // Always include upper subtrie, plus any lower subtries + let total_subtries = 1 + NUM_LOWER_SUBTRIES; + let size_per_subtrie = size / total_subtries; + + // Shrink the upper subtrie + self.upper_subtrie.shrink_values_to(size_per_subtrie); + + // Shrink lower subtries (works for both revealed and blind with allocation) + for subtrie in &mut self.lower_subtries { + subtrie.shrink_values_to(size_per_subtrie); + } + } } impl ParallelSparseTrie { @@ -2111,6 +2147,16 @@ impl SparseSubtrie { pub(crate) fn value_capacity(&self) -> usize { self.inner.value_capacity() } + + /// Shrinks the capacity of the subtrie's node storage. + pub(crate) fn shrink_nodes_to(&mut self, size: usize) { + self.nodes.shrink_to(size); + } + + /// Shrinks the capacity of the subtrie's value storage. + pub(crate) fn shrink_values_to(&mut self, size: usize) { + self.inner.values.shrink_to(size); + } } /// Helper type for [`SparseSubtrie`] to mutably access only a subset of fields from the original @@ -2571,10 +2617,19 @@ impl SparseSubtrieBuffers { /// Clears all buffers. fn clear(&mut self) { self.path_stack.clear(); + self.path_stack.shrink_to_fit(); + self.rlp_node_stack.clear(); + self.rlp_node_stack.shrink_to_fit(); + self.branch_child_buf.clear(); + self.branch_child_buf.shrink_to_fit(); + self.branch_value_stack_buf.clear(); + self.branch_value_stack_buf.shrink_to_fit(); + self.rlp_buf.clear(); + self.rlp_buf.shrink_to_fit(); } } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index aef552da3dd..a202ebc8b2b 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -43,6 +43,32 @@ where Self(trie) } + /// Shrink the cleared sparse trie's capacity to the given node and value size. + /// This helps reduce memory usage when the trie has excess capacity. + /// The capacity is distributed equally across the account trie and all storage tries. + pub fn shrink_to(&mut self, node_size: usize, value_size: usize) { + // Count total number of storage tries (active + cleared + default) + let storage_tries_count = self.0.storage.tries.len() + self.0.storage.cleared_tries.len(); + + // Total tries = 1 account trie + all storage tries + let total_tries = 1 + storage_tries_count; + + // Distribute capacity equally among all tries + let node_size_per_trie = node_size / total_tries; + let value_size_per_trie = value_size / total_tries; + + // Shrink the account trie + self.0.state.shrink_nodes_to(node_size_per_trie); + self.0.state.shrink_values_to(value_size_per_trie); + + // Give storage tries the remaining capacity after account trie allocation + let storage_node_size = node_size.saturating_sub(node_size_per_trie); + let storage_value_size = value_size.saturating_sub(value_size_per_trie); + + // Shrink all storage tries (they will redistribute internally) + self.0.storage.shrink_to(storage_node_size, storage_value_size); + } + /// Returns the cleared [`SparseStateTrie`], consuming this instance. pub fn into_inner(self) -> SparseStateTrie { self.0 @@ -860,6 +886,31 @@ impl StorageTries { set })); } + + /// Shrinks the capacity of all storage tries (active, cleared, and default) to the given sizes. + /// The capacity is distributed equally among all tries that have allocations. + fn shrink_to(&mut self, node_size: usize, value_size: usize) { + // Count total number of tries with capacity (active + cleared + default) + let active_count = self.tries.len(); + let cleared_count = self.cleared_tries.len(); + let total_tries = 1 + active_count + cleared_count; + + // Distribute capacity equally among all tries + let node_size_per_trie = node_size / total_tries; + let value_size_per_trie = value_size / total_tries; + + // Shrink active storage tries + for trie in self.tries.values_mut() { + trie.shrink_nodes_to(node_size_per_trie); + trie.shrink_values_to(value_size_per_trie); + } + + // Shrink cleared storage tries + for trie in &mut self.cleared_tries { + trie.shrink_nodes_to(node_size_per_trie); + trie.shrink_values_to(value_size_per_trie); + } + } } impl StorageTries { diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 8fdbb78d876..5b7b6193f96 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -228,6 +228,14 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// This returns the capacity of any inner data structures which store leaf values. fn value_capacity(&self) -> usize; + + /// Shrink the capacity of the sparse trie's node storage to the given size. + /// This will reduce memory usage if the current capacity is higher than the given size. + fn shrink_nodes_to(&mut self, size: usize); + + /// Shrink the capacity of the sparse trie's value storage to the given size. + /// This will reduce memory usage if the current capacity is higher than the given size. + fn shrink_values_to(&mut self, size: usize); } /// Struct for passing around branch node mask information. diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 737da842254..8500ea400b5 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -275,6 +275,28 @@ impl SparseTrie { _ => 0, } } + + /// Shrinks the capacity of the sparse trie's node storage. + /// Works for both revealed and blind tries with allocated storage. + pub fn shrink_nodes_to(&mut self, size: usize) { + match self { + Self::Blind(Some(trie)) | Self::Revealed(trie) => { + trie.shrink_nodes_to(size); + } + _ => {} + } + } + + /// Shrinks the capacity of the sparse trie's value storage. + /// Works for both revealed and blind tries with allocated storage. + pub fn shrink_values_to(&mut self, size: usize) { + match self { + Self::Blind(Some(trie)) | Self::Revealed(trie) => { + trie.shrink_values_to(size); + } + _ => {} + } + } } /// The representation of revealed sparse trie. @@ -1088,6 +1110,16 @@ impl SparseTrieInterface for SerialSparseTrie { fn value_capacity(&self) -> usize { self.values.capacity() } + + fn shrink_nodes_to(&mut self, size: usize) { + self.nodes.shrink_to(size); + self.branch_node_tree_masks.shrink_to(size); + self.branch_node_hash_masks.shrink_to(size); + } + + fn shrink_values_to(&mut self, size: usize) { + self.values.shrink_to(size); + } } impl SerialSparseTrie { From 6739914ce7c9c5fe0947a6b7ff5a300a917afa6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Thu, 23 Oct 2025 16:44:24 +0200 Subject: [PATCH 19/23] feat(otlp-tracing): enable to export traces with grpc export with `tracing-otlp` and `tracing-otlp-protocol` arg (#18985) --- Cargo.lock | 26 +++++++ crates/ethereum/cli/Cargo.toml | 2 + crates/ethereum/cli/src/app.rs | 51 ++++++++++--- crates/node/core/Cargo.toml | 3 +- crates/node/core/src/args/trace.rs | 58 +++++++++------ crates/optimism/cli/Cargo.toml | 3 + crates/optimism/cli/src/app.rs | 50 ++++++++++--- crates/tracing-otlp/Cargo.toml | 3 +- crates/tracing-otlp/src/lib.rs | 71 +++++++++++-------- crates/tracing/src/layers.rs | 12 ++-- docs/vocs/docs/pages/cli/reth.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/config.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/checksum.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/clear.mdx | 18 ++++- .../docs/pages/cli/reth/db/clear/mdbx.mdx | 18 ++++- .../pages/cli/reth/db/clear/static-file.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/diff.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/drop.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/get.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx | 18 ++++- .../pages/cli/reth/db/get/static-file.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/list.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/path.mdx | 18 ++++- .../docs/pages/cli/reth/db/repair-trie.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/stats.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/version.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/download.mdx | 18 ++++- .../vocs/docs/pages/cli/reth/dump-genesis.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/export-era.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/import-era.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/import.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/init-state.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/init.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/node.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/p2p.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 18 ++++- .../vocs/docs/pages/cli/reth/p2p/bootnode.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 18 ++++- .../docs/pages/cli/reth/p2p/rlpx/ping.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/prune.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/stage.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 18 ++++- .../cli/reth/stage/dump/account-hashing.mdx | 18 ++++- .../pages/cli/reth/stage/dump/execution.mdx | 18 ++++- .../docs/pages/cli/reth/stage/dump/merkle.mdx | 18 ++++- .../cli/reth/stage/dump/storage-hashing.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 18 ++++- .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 18 ++++- .../cli/reth/stage/unwind/num-blocks.mdx | 18 ++++- .../pages/cli/reth/stage/unwind/to-block.mdx | 18 ++++- 54 files changed, 910 insertions(+), 161 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6839523354a..dbfc2f99a6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4478,6 +4478,19 @@ dependencies = [ "webpki-roots 1.0.3", ] +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.17" @@ -6189,6 +6202,8 @@ dependencies = [ "prost", "reqwest", "thiserror 2.0.17", + "tokio", + "tonic", "tracing", ] @@ -8304,8 +8319,10 @@ dependencies = [ "reth-node-metrics", "reth-rpc-server-types", "reth-tracing", + "reth-tracing-otlp", "tempfile", "tracing", + "url", ] [[package]] @@ -9001,6 +9018,7 @@ dependencies = [ "reth-storage-api", "reth-storage-errors", "reth-tracing", + "reth-tracing-otlp", "reth-transaction-pool", "secp256k1 0.30.0", "serde", @@ -9255,11 +9273,13 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-tracing", + "reth-tracing-otlp", "serde", "tempfile", "tokio", "tokio-util", "tracing", + "url", ] [[package]] @@ -10580,6 +10600,7 @@ dependencies = [ name = "reth-tracing-otlp" version = "1.8.2" dependencies = [ + "clap", "eyre", "opentelemetry", "opentelemetry-otlp", @@ -12594,10 +12615,15 @@ dependencies = [ "http", "http-body", "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", "sync_wrapper", + "tokio", "tokio-stream", + "tower", "tower-layer", "tower-service", "tracing", diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index e232ea0cdb1..5dbb8bf4cd3 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -23,11 +23,13 @@ reth-node-ethereum.workspace = true reth-node-metrics.workspace = true reth-rpc-server-types.workspace = true reth-tracing.workspace = true +reth-tracing-otlp.workspace = true reth-node-api.workspace = true # misc clap.workspace = true eyre.workspace = true +url.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index ab3682be6dc..b947d6df1db 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -14,8 +14,10 @@ use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthereumNo use reth_node_metrics::recorder::install_prometheus_recorder; use reth_rpc_server_types::RpcModuleValidator; use reth_tracing::{FileWorkerGuard, Layers}; +use reth_tracing_otlp::OtlpProtocol; use std::{fmt, sync::Arc}; use tracing::info; +use url::Url; /// A wrapper around a parsed CLI that handles command execution. #[derive(Debug)] @@ -96,7 +98,8 @@ where self.cli.logs.log_file_directory.join(chain_spec.chain().to_string()); } - self.init_tracing()?; + self.init_tracing(&runner)?; + // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); @@ -106,18 +109,19 @@ where /// Initializes tracing with the configured options. /// /// If file logging is enabled, this function stores guard to the struct. - pub fn init_tracing(&mut self) -> Result<()> { + /// For gRPC OTLP, it requires tokio runtime context. + pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> { if self.guard.is_none() { let mut layers = self.layers.take().unwrap_or_default(); #[cfg(feature = "otlp")] - if let Some(output_type) = &self.cli.traces.otlp { - info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); - layers.with_span_layer( - "reth".to_string(), - output_type.clone(), - self.cli.traces.otlp_filter.clone(), - )?; + { + self.cli.traces.validate()?; + + if let Some(endpoint) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", endpoint); + self.init_otlp_export(&mut layers, endpoint, runner)?; + } } self.guard = self.cli.logs.init_tracing_with_layers(layers)?; @@ -125,6 +129,35 @@ where } Ok(()) } + + /// Initialize OTLP tracing export based on protocol type. + /// + /// For gRPC, `block_on` is required because tonic's channel initialization needs + /// a tokio runtime context, even though `with_span_layer` itself is not async. + #[cfg(feature = "otlp")] + fn init_otlp_export( + &self, + layers: &mut Layers, + endpoint: &Url, + runner: &CliRunner, + ) -> Result<()> { + let endpoint = endpoint.clone(); + let protocol = self.cli.traces.protocol; + let filter_level = self.cli.traces.otlp_filter.clone(); + + match protocol { + OtlpProtocol::Grpc => { + runner.block_on(async { + layers.with_span_layer("reth".to_string(), endpoint, filter_level, protocol) + })?; + } + OtlpProtocol::Http => { + layers.with_span_layer("reth".to_string(), endpoint, filter_level, protocol)?; + } + } + + Ok(()) + } } /// Run CLI commands with the provided runner, components and launcher. diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 1a4f85b6198..b1a472bd9fd 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -58,8 +58,9 @@ url.workspace = true dirs-next.workspace = true shellexpand.workspace = true -# tracing +# obs tracing.workspace = true +reth-tracing-otlp.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] } diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs index 45bc9c9029c..5b5e21502d1 100644 --- a/crates/node/core/src/args/trace.rs +++ b/crates/node/core/src/args/trace.rs @@ -1,17 +1,19 @@ //! Opentelemetry tracing configuration through CLI args. use clap::Parser; -use eyre::{ensure, WrapErr}; +use eyre::WrapErr; use reth_tracing::tracing_subscriber::EnvFilter; +use reth_tracing_otlp::OtlpProtocol; use url::Url; /// CLI arguments for configuring `Opentelemetry` trace and span export. #[derive(Debug, Clone, Parser)] pub struct TraceArgs { - /// Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently - /// only http exporting is supported. + /// Enable `Opentelemetry` tracing export to an OTLP endpoint. /// - /// If no value provided, defaults to `http://localhost:4318/v1/traces`. + /// If no value provided, defaults based on protocol: + /// - HTTP: `http://localhost:4318/v1/traces` + /// - gRPC: `http://localhost:4317` /// /// Example: --tracing-otlp=http://collector:4318/v1/traces #[arg( @@ -28,6 +30,22 @@ pub struct TraceArgs { )] pub otlp: Option, + /// OTLP transport protocol to use for exporting traces. + /// + /// - `http`: expects endpoint path to end with `/v1/traces` + /// - `grpc`: expects endpoint without a path + /// + /// Defaults to HTTP if not specified. + #[arg( + long = "tracing-otlp-protocol", + env = "OTEL_EXPORTER_OTLP_PROTOCOL", + global = true, + value_name = "PROTOCOL", + default_value = "http", + help_heading = "Tracing" + )] + pub protocol: OtlpProtocol, + /// Set a filter directive for the OTLP tracer. This controls the verbosity /// of spans and events sent to the OTLP endpoint. It follows the same /// syntax as the `RUST_LOG` environment variable. @@ -47,25 +65,25 @@ pub struct TraceArgs { impl Default for TraceArgs { fn default() -> Self { - Self { otlp: None, otlp_filter: EnvFilter::from_default_env() } + Self { + otlp: None, + protocol: OtlpProtocol::Http, + otlp_filter: EnvFilter::from_default_env(), + } } } -// Parses and validates an OTLP endpoint url. -fn parse_otlp_endpoint(arg: &str) -> eyre::Result { - let mut url = Url::parse(arg).wrap_err("Invalid URL for OTLP trace output")?; - - // If the path is empty, we set the path. - if url.path() == "/" { - url.set_path("/v1/traces") +impl TraceArgs { + /// Validate the configuration + pub fn validate(&mut self) -> eyre::Result<()> { + if let Some(url) = &mut self.otlp { + self.protocol.validate_endpoint(url)?; + } + Ok(()) } +} - // OTLP url must end with `/v1/traces` per the OTLP specification. - ensure!( - url.path().ends_with("/v1/traces"), - "OTLP trace endpoint must end with /v1/traces, got path: {}", - url.path() - ); - - Ok(url) +// Parses an OTLP endpoint url. +fn parse_otlp_endpoint(arg: &str) -> eyre::Result { + Url::parse(arg).wrap_err("Invalid URL for OTLP trace output") } diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 6ed24ca5823..eb320045337 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -44,6 +44,7 @@ reth-optimism-evm.workspace = true reth-cli-runner.workspace = true reth-node-builder = { workspace = true, features = ["op"] } reth-tracing.workspace = true +reth-tracing-otlp.workspace = true # eth alloy-eips.workspace = true @@ -55,6 +56,7 @@ alloy-rlp.workspace = true futures-util.workspace = true derive_more.workspace = true serde.workspace = true +url.workspace = true clap = { workspace = true, features = ["derive", "env"] } tokio = { workspace = true, features = ["sync", "macros", "time", "rt-multi-thread"] } @@ -105,4 +107,5 @@ serde = [ "reth-optimism-primitives/serde", "reth-primitives-traits/serde", "reth-optimism-chainspec/serde", + "url/serde", ] diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 621d16c7e13..8567c2b7e5a 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -9,8 +9,10 @@ use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_node::{OpExecutorProvider, OpNode}; use reth_rpc_server_types::RpcModuleValidator; use reth_tracing::{FileWorkerGuard, Layers}; +use reth_tracing_otlp::OtlpProtocol; use std::{fmt, sync::Arc}; use tracing::info; +use url::Url; /// A wrapper around a parsed CLI that handles command execution. #[derive(Debug)] @@ -63,7 +65,8 @@ where self.cli.logs.log_file_directory.join(chain_spec.chain.to_string()); } - self.init_tracing()?; + self.init_tracing(&runner)?; + // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); @@ -114,18 +117,18 @@ where /// Initializes tracing with the configured options. /// /// If file logging is enabled, this function stores guard to the struct. - pub fn init_tracing(&mut self) -> Result<()> { + /// For gRPC OTLP, it requires tokio runtime context. + pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> { if self.guard.is_none() { let mut layers = self.layers.take().unwrap_or_default(); #[cfg(feature = "otlp")] - if let Some(output_type) = &self.cli.traces.otlp { - info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); - layers.with_span_layer( - "reth".to_string(), - output_type.clone(), - self.cli.traces.otlp_filter.clone(), - )?; + { + self.cli.traces.validate()?; + if let Some(endpoint) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", endpoint); + self.init_otlp_export(&mut layers, endpoint, runner)?; + } } self.guard = self.cli.logs.init_tracing_with_layers(layers)?; @@ -133,4 +136,33 @@ where } Ok(()) } + + /// Initialize OTLP tracing export based on protocol type. + /// + /// For gRPC, `block_on` is required because tonic's channel initialization needs + /// a tokio runtime context, even though `with_span_layer` itself is not async. + #[cfg(feature = "otlp")] + fn init_otlp_export( + &self, + layers: &mut Layers, + endpoint: &Url, + runner: &CliRunner, + ) -> Result<()> { + let endpoint = endpoint.clone(); + let protocol = self.cli.traces.protocol; + let level_filter = self.cli.traces.otlp_filter.clone(); + + match protocol { + OtlpProtocol::Grpc => { + runner.block_on(async { + layers.with_span_layer("reth".to_string(), endpoint, level_filter, protocol) + })?; + } + OtlpProtocol::Http => { + layers.with_span_layer("reth".to_string(), endpoint, level_filter, protocol)?; + } + } + + Ok(()) + } } diff --git a/crates/tracing-otlp/Cargo.toml b/crates/tracing-otlp/Cargo.toml index 60cee0aa229..5b01095d4ff 100644 --- a/crates/tracing-otlp/Cargo.toml +++ b/crates/tracing-otlp/Cargo.toml @@ -12,13 +12,14 @@ exclude.workspace = true # obs opentelemetry_sdk = { workspace = true, optional = true } opentelemetry = { workspace = true, optional = true } -opentelemetry-otlp = { workspace = true, optional = true } +opentelemetry-otlp = { workspace = true, optional = true, features = ["grpc-tonic"] } opentelemetry-semantic-conventions = { workspace = true, optional = true } tracing-opentelemetry = { workspace = true, optional = true } tracing-subscriber.workspace = true tracing.workspace = true # misc +clap = { workspace = true, features = ["derive"] } eyre.workspace = true url.workspace = true diff --git a/crates/tracing-otlp/src/lib.rs b/crates/tracing-otlp/src/lib.rs index 07415ac2a65..2cfd332a408 100644 --- a/crates/tracing-otlp/src/lib.rs +++ b/crates/tracing-otlp/src/lib.rs @@ -6,7 +6,8 @@ //! applications. It allows for easily capturing and exporting distributed traces to compatible //! backends like Jaeger, Zipkin, or any other OpenTelemetry-compatible tracing system. -use eyre::{ensure, WrapErr}; +use clap::ValueEnum; +use eyre::ensure; use opentelemetry::{global, trace::TracerProvider, KeyValue, Value}; use opentelemetry_otlp::{SpanExporter, WithExportConfig}; use opentelemetry_sdk::{ @@ -20,6 +21,10 @@ use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::registry::LookupSpan; use url::Url; +// Otlp http endpoint is expected to end with this path. +// See also . +const HTTP_TRACE_ENDPOINT: &str = "/v1/traces"; + /// Creates a tracing [`OpenTelemetryLayer`] that exports spans to an OTLP endpoint. /// /// This layer can be added to a [`tracing_subscriber::Registry`] to enable `OpenTelemetry` tracing @@ -27,6 +32,7 @@ use url::Url; pub fn span_layer( service_name: impl Into, endpoint: &Url, + protocol: OtlpProtocol, ) -> eyre::Result> where for<'span> S: Subscriber + LookupSpan<'span>, @@ -35,8 +41,12 @@ where let resource = build_resource(service_name); - let span_exporter = - SpanExporter::builder().with_http().with_endpoint(endpoint.to_string()).build()?; + let span_builder = SpanExporter::builder(); + + let span_exporter = match protocol { + OtlpProtocol::Http => span_builder.with_http().with_endpoint(endpoint.as_str()).build()?, + OtlpProtocol::Grpc => span_builder.with_tonic().with_endpoint(endpoint.as_str()).build()?, + }; let tracer_provider = SdkTracerProvider::builder() .with_resource(resource) @@ -45,7 +55,7 @@ where global::set_tracer_provider(tracer_provider.clone()); - let tracer = tracer_provider.tracer("reth-otlp"); + let tracer = tracer_provider.tracer("reth"); Ok(tracing_opentelemetry::layer().with_tracer(tracer)) } @@ -57,34 +67,37 @@ fn build_resource(service_name: impl Into) -> Resource { .build() } -/// Destination for exported trace spans. -#[derive(Debug, Clone)] -pub enum TraceOutput { - /// Export traces as JSON to stdout. - Stdout, - /// Export traces to an OTLP collector at the specified URL. - Otlp(Url), +/// OTLP transport protocol type +#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] +pub enum OtlpProtocol { + /// HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + Http, + /// gRPC transport, port 4317 + Grpc, } -impl TraceOutput { - /// Parses the trace output destination from a string. +impl OtlpProtocol { + /// Validate and correct the URL to match protocol requirements. /// - /// Returns `TraceOutput::Stdout` for "stdout", or `TraceOutput::Otlp` for valid OTLP URLs. - /// OTLP URLs must end with `/v1/traces` per the OTLP specification. - pub fn parse(s: &str) -> eyre::Result { - if s == "stdout" { - return Ok(Self::Stdout); + /// For HTTP: Ensures the path ends with `/v1/traces`, appending it if necessary. + /// For gRPC: Ensures the path does NOT include `/v1/traces`. + pub fn validate_endpoint(&self, url: &mut Url) -> eyre::Result<()> { + match self { + Self::Http => { + if !url.path().ends_with(HTTP_TRACE_ENDPOINT) { + let path = url.path().trim_end_matches('/'); + url.set_path(&format!("{}{}", path, HTTP_TRACE_ENDPOINT)); + } + } + Self::Grpc => { + ensure!( + !url.path().ends_with(HTTP_TRACE_ENDPOINT), + "OTLP gRPC endpoint should not include {} path, got: {}", + HTTP_TRACE_ENDPOINT, + url + ); + } } - - let url = Url::parse(s).wrap_err("Invalid URL for trace output")?; - - // OTLP specification requires the `/v1/traces` path for trace endpoints - ensure!( - url.path().ends_with("/v1/traces"), - "OTLP trace endpoint must end with /v1/traces, got path: {}", - url.path() - ); - - Ok(Self::Otlp(url)) + Ok(()) } } diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 210c0066308..660d40ae464 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -1,6 +1,4 @@ use crate::formatter::LogFormat; -#[cfg(feature = "otlp")] -use reth_tracing_otlp::span_layer; use rolling_file::{RollingConditionBasic, RollingFileAppender}; use std::{ fmt, @@ -8,6 +6,11 @@ use std::{ }; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{filter::Directive, EnvFilter, Layer, Registry}; +#[cfg(feature = "otlp")] +use { + reth_tracing_otlp::{span_layer, OtlpProtocol}, + url::Url, +}; /// A worker guard returned by the file layer. /// @@ -134,12 +137,13 @@ impl Layers { pub fn with_span_layer( &mut self, service_name: String, - endpoint_exporter: url::Url, + endpoint_exporter: Url, filter: EnvFilter, + otlp_protocol: OtlpProtocol, ) -> eyre::Result<()> { // Create the span provider - let span_layer = span_layer(service_name, &endpoint_exporter) + let span_layer = span_layer(service_name, &endpoint_exporter, otlp_protocol) .map_err(|e| eyre::eyre!("Failed to build OTLP span exporter {}", e))? .with_filter(filter); diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 041d494523c..c35216d6b5c 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -116,14 +116,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index 96bdcf7a98c..6b3c9e4b657 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -102,14 +102,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index f2a49420837..a7bda7c3da7 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -167,14 +167,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index c86273aacf4..4b8b8ca2cce 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -119,14 +119,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 88fd92763f8..1548558fe39 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -111,14 +111,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index c467fe9d3dd..b48ba180982 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -110,14 +110,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index d4b59a05223..9f22178ec4c 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -113,14 +113,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index 4bb81ac07c9..fe7dd7d0bae 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -146,14 +146,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index c75a889458b..c778320f2d8 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -109,14 +109,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index 8c20c7e311a..dfcfcac1886 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -111,14 +111,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 3b8df2f3a4f..981d0c9f9a5 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -119,14 +119,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 3980903c65d..8e045a4cdf1 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -119,14 +119,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index 16131a95a17..3be1cd183b2 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -152,14 +152,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index 0c09f5be69b..a954093dd5d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -106,14 +106,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index 9c08ff331ed..6436afc2133 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -109,14 +109,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 47695e1b22a..5bd316847c0 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -119,14 +119,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index 7611b69946d..c87496d910d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -106,14 +106,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index b18faa93205..f8f1c199de5 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -164,14 +164,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index bf5b0ac534c..7aeaa8db49a 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -105,14 +105,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index cd413c12841..da732cda33b 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -170,14 +170,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 7d62409a638..77afcd5a6b3 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -165,14 +165,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 8e3e1cdb0a2..405009c6071 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -166,14 +166,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 49c0e098098..2ef6fdbe838 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -186,14 +186,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index ac1c7ff254b..51dc401d567 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -154,14 +154,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 7b70afe44c9..48b1c75c591 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -993,14 +993,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index b81c00a0382..7b37fdfdaa3 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -103,14 +103,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index fd28a37ebb1..bbe6b375e5b 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -323,14 +323,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 63baa86d367..324b01daac5 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -114,14 +114,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index f9f94497547..533bd71de2e 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -323,14 +323,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index 78d6dd8d3ba..a8ac7fbd0df 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -100,14 +100,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index 2089c92461e..2d136630298 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -100,14 +100,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 8f5828e8a67..8dfd3003816 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -154,14 +154,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index 56a7e3558c4..b7371fa4cf6 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -167,14 +167,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index 822f0f0c2db..006c6c74340 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -103,14 +103,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 037495979a0..19e813bec22 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -169,14 +169,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 8484379fe36..20cf8660bf1 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -161,14 +161,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index 079804ff088..70fad94ea3a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -118,14 +118,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index 7aee318e1ac..bed5d33329a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -118,14 +118,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index 17b2b7c9515..3bada103c87 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -118,14 +118,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index de64aa51c33..723a54e9272 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -118,14 +118,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 5407938072f..ae57239c9d3 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -390,14 +390,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 2d2f94d6801..a7581b22b3f 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -162,14 +162,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index a376af84012..b04e1920b75 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -110,14 +110,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index ce62c643600..2c22f8127c1 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -110,14 +110,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. From 4adfa286f7447efe7abff0e6f9038d88b5481b25 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 23 Oct 2025 20:17:26 +0400 Subject: [PATCH 20/23] fix: return hashed peer key as id (#19245) --- crates/rpc/rpc/src/admin.rs | 48 ++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 28 deletions(-) diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index ce548230864..af5e1ae2ef9 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -14,6 +14,7 @@ use reth_network_types::PeerKind; use reth_rpc_api::AdminApiServer; use reth_rpc_server_types::ToRpcResult; use reth_transaction_pool::TransactionPool; +use revm_primitives::keccak256; /// `admin` API implementation. /// @@ -74,34 +75,25 @@ where let mut infos = Vec::with_capacity(peers.len()); for peer in peers { - if let Ok(pk) = id2pk(peer.remote_id) { - infos.push(PeerInfo { - id: pk.to_string(), - name: peer.client_version.to_string(), - enode: peer.enode, - enr: peer.enr, - caps: peer - .capabilities - .capabilities() - .iter() - .map(|cap| cap.to_string()) - .collect(), - network: PeerNetworkInfo { - remote_address: peer.remote_addr, - local_address: peer.local_addr.unwrap_or_else(|| self.network.local_addr()), - inbound: peer.direction.is_incoming(), - trusted: peer.kind.is_trusted(), - static_node: peer.kind.is_static(), - }, - protocols: PeerProtocolInfo { - eth: Some(EthPeerInfo::Info(EthInfo { - version: peer.status.version as u64, - })), - snap: None, - other: Default::default(), - }, - }) - } + infos.push(PeerInfo { + id: keccak256(peer.remote_id.as_slice()).to_string(), + name: peer.client_version.to_string(), + enode: peer.enode, + enr: peer.enr, + caps: peer.capabilities.capabilities().iter().map(|cap| cap.to_string()).collect(), + network: PeerNetworkInfo { + remote_address: peer.remote_addr, + local_address: peer.local_addr.unwrap_or_else(|| self.network.local_addr()), + inbound: peer.direction.is_incoming(), + trusted: peer.kind.is_trusted(), + static_node: peer.kind.is_static(), + }, + protocols: PeerProtocolInfo { + eth: Some(EthPeerInfo::Info(EthInfo { version: peer.status.version as u64 })), + snap: None, + other: Default::default(), + }, + }) } Ok(infos) From 663690f348a35da6e2588b0909633364dc6cc9cf Mon Sep 17 00:00:00 2001 From: Ishika Choudhury <117741714+Rimeeeeee@users.noreply.github.com> Date: Thu, 23 Oct 2025 22:29:43 +0530 Subject: [PATCH 21/23] fixes --- Cargo.lock | 30 ++++++------- Cargo.toml | 4 +- .../engine/invalid-block-hooks/src/witness.rs | 21 +++++---- crates/engine/tree/benches/channel_perf.rs | 1 + crates/engine/tree/benches/state_root_task.rs | 1 + crates/engine/tree/src/tree/metrics.rs | 21 ++++++--- .../tree/src/tree/payload_processor/mod.rs | 1 + .../engine/tree/src/tree/payload_validator.rs | 17 ++++---- crates/engine/util/src/reorg.rs | 19 ++++---- crates/ethereum/evm/src/test_utils.rs | 13 +++--- crates/ethereum/evm/tests/execute.rs | 3 ++ crates/ethereum/payload/src/lib.rs | 16 +++---- crates/evm/evm/src/either.rs | 2 +- crates/evm/evm/src/execute.rs | 43 +++++++++++-------- crates/evm/evm/src/lib.rs | 13 +++--- .../execution-types/src/execution_outcome.rs | 18 ++++++-- crates/optimism/flashblocks/src/worker.rs | 10 ++++- crates/optimism/payload/src/builder.rs | 20 ++++++--- crates/primitives-traits/src/account.rs | 1 + crates/rpc/rpc-eth-api/src/helpers/call.rs | 15 +++---- .../rpc-eth-api/src/helpers/pending_block.rs | 10 ++--- crates/rpc/rpc-eth-types/src/error/mod.rs | 26 ++++++++++- crates/stateless/src/witness_db.rs | 1 + crates/trie/common/src/hashed_state.rs | 2 + .../custom-beacon-withdrawals/src/main.rs | 11 +++-- examples/custom-node/src/evm/executor.rs | 11 +++-- 26 files changed, 204 insertions(+), 126 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b7c6cf347f..ab0851a79ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -262,7 +262,7 @@ dependencies = [ [[package]] name = "alloy-evm" version = "0.22.3" -source = "git+https://github.com/Rimeeeeee/evm?branch=new-approach1#4be042c9ef8f3af15f94f2eac6441525811d57b1" +source = "git+https://github.com/Rimeeeeee/evm?branch=new-approach2#32bd3da9eafab93a0f32fd855a3f27f1b0a0ca08" dependencies = [ "alloy-consensus", "alloy-eips", @@ -375,7 +375,7 @@ dependencies = [ [[package]] name = "alloy-op-evm" version = "0.22.3" -source = "git+https://github.com/Rimeeeeee/evm?branch=new-approach1#4be042c9ef8f3af15f94f2eac6441525811d57b1" +source = "git+https://github.com/Rimeeeeee/evm?branch=new-approach2#32bd3da9eafab93a0f32fd855a3f27f1b0a0ca08" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6111,7 +6111,7 @@ dependencies = [ [[package]] name = "op-revm" version = "11.2.0" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "auto_impl", "revm", @@ -10815,7 +10815,7 @@ dependencies = [ [[package]] name = "revm" version = "30.2.0" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "revm-bytecode", "revm-context", @@ -10833,7 +10833,7 @@ dependencies = [ [[package]] name = "revm-bytecode" version = "7.0.2" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "bitvec", "phf 0.13.1", @@ -10844,7 +10844,7 @@ dependencies = [ [[package]] name = "revm-context" version = "10.1.2" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "bitvec", "cfg-if", @@ -10860,7 +10860,7 @@ dependencies = [ [[package]] name = "revm-context-interface" version = "11.1.2" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10875,7 +10875,7 @@ dependencies = [ [[package]] name = "revm-database" version = "9.0.2" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "alloy-eips", "revm-bytecode", @@ -10888,7 +10888,7 @@ dependencies = [ [[package]] name = "revm-database-interface" version = "8.0.3" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "auto_impl", "either", @@ -10900,7 +10900,7 @@ dependencies = [ [[package]] name = "revm-handler" version = "11.2.0" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "auto_impl", "derive-where", @@ -10918,7 +10918,7 @@ dependencies = [ [[package]] name = "revm-inspector" version = "11.2.0" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "auto_impl", "either", @@ -10968,7 +10968,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "28.0.0" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -10980,7 +10980,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "28.1.1" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11004,7 +11004,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "21.0.1" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "alloy-primitives", "num_enum", @@ -11015,7 +11015,7 @@ dependencies = [ [[package]] name = "revm-state" version = "8.0.2" -source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#44e67cc41e2f737b0fa9c3a31b5c539ea24cfdf6" +source = "git+https://github.com/bluealloy/revm?branch=rakita%2Fbal#ea1e03e92e3772985230eb393a072f475c0de893" dependencies = [ "alloy-eip7928", "bitflags 2.10.0", diff --git a/Cargo.toml b/Cargo.toml index 2ead9e5a87a..4d79a7c08d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -769,8 +769,8 @@ alloy-transport-ws = { git = "https://github.com/Soubhik-10/alloy", branch = "ba # # revm-inspectors = { git = "https://github.com/paradigmxyz/revm-inspectors", rev = "1207e33" } revm = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } -alloy-evm = { git = "https://github.com/Rimeeeeee/evm", branch = "new-approach1" } -alloy-op-evm = { git = "https://github.com/Rimeeeeee/evm", branch = "new-approach1" } +alloy-evm = { git = "https://github.com/Rimeeeeee/evm", branch = "new-approach2" } +alloy-op-evm = { git = "https://github.com/Rimeeeeee/evm", branch = "new-approach2" } revm-bytecode = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } revm-database = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } revm-state = { git = "https://github.com/bluealloy/revm", branch = "rakita/bal" } diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 7c66c67373b..19a2691d76d 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -16,6 +16,7 @@ use reth_trie::{updates::TrieUpdates, HashedStorage}; use revm::state::AccountInfo; use revm_bytecode::Bytecode; use revm_database::{ + bal::BalDatabase, states::{reverts::AccountInfoRevert, StorageSlot}, AccountStatus, RevertToSlot, }; @@ -114,7 +115,7 @@ fn sort_bundle_state_for_comparison(bundle_state: &BundleState) -> BundleStateSo /// Extracts execution data including codes, preimages, and hashed state from database fn collect_execution_data( - mut db: State>>, + mut db: BalDatabase>>>, ) -> eyre::Result { let bundle_state = db.take_bundle(); let mut codes = BTreeMap::new(); @@ -128,7 +129,7 @@ fn collect_execution_data( }); // Collect preimages - for (address, account) in db.cache.accounts { + for (address, account) in db.db.cache.accounts { let hashed_address = keccak256(address); hashed_state .accounts @@ -448,12 +449,14 @@ mod tests { nonce: account.nonce, code_hash: account.bytecode_hash.unwrap_or_default(), code: None, + storage_id: None, }), original_info: (i == 0).then(|| AccountInfo { balance: account.balance.checked_div(U256::from(2)).unwrap_or(U256::ZERO), nonce: 0, code_hash: account.bytecode_hash.unwrap_or_default(), code: None, + storage_id: None, }), storage, status: AccountStatus::default(), @@ -529,12 +532,14 @@ mod tests { // Create a State with StateProviderTest let state_provider = StateProviderTest::default(); - let mut state = State::builder() - .with_database(StateProviderDatabase::new( - Box::new(state_provider) as Box - )) - .with_bundle_update() - .build(); + let mut state = BalDatabase::new( + State::builder() + .with_database(StateProviderDatabase::new( + Box::new(state_provider) as Box + )) + .with_bundle_update() + .build(), + ); // Insert contracts from the fixture into the state cache for (code_hash, bytecode) in &bundle_state.contracts { diff --git a/crates/engine/tree/benches/channel_perf.rs b/crates/engine/tree/benches/channel_perf.rs index 2409f442796..84adaf77090 100644 --- a/crates/engine/tree/benches/channel_perf.rs +++ b/crates/engine/tree/benches/channel_perf.rs @@ -26,6 +26,7 @@ fn create_bench_state(num_accounts: usize) -> EvmState { nonce: 10, code_hash: B256::from_slice(&rng.random::<[u8; 32]>()), code: Default::default(), + storage_id: None, }, storage, status: AccountStatus::empty(), diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index d0e32c1fccc..92af001d667 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -72,6 +72,7 @@ fn create_bench_state_updates(params: &BenchParams) -> Vec { nonce: rng.random::(), code_hash: KECCAK_EMPTY, code: Some(Default::default()), + storage_id: None, }, storage: (0..rng.random_range(0..=params.storage_slots_per_account)) .map(|_| { diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index b70b3b5f72a..3a4f85fcd2d 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -13,6 +13,7 @@ use reth_metrics::{ Metrics, }; use reth_primitives_traits::SignedTransaction; +use reth_revm::db::bal::BalDatabase; use reth_trie::updates::TrieUpdates; use revm::database::{states::bundle_state::BundleRetention, State}; use std::time::Instant; @@ -65,7 +66,10 @@ impl EngineApiMetrics { ) -> Result, BlockExecutionError> where DB: alloy_evm::Database, - E: BlockExecutor>>, Transaction: SignedTransaction>, + E: BlockExecutor< + Evm: Evm>>>, + Transaction: SignedTransaction, + >, { // clone here is cheap, all the metrics are Option>. additionally // they are globally registered so that the data recorded in the hook will @@ -246,7 +250,7 @@ mod tests { } // Mock Evm type for testing - type MockEvm = EthEvm, NoOpInspector>; + type MockEvm = EthEvm>, NoOpInspector>; impl BlockExecutor for MockExecutor { type Transaction = TransactionSigned; @@ -297,11 +301,13 @@ mod tests { } // Create a mock EVM - let db = State::builder() - .with_database(EmptyDB::default()) - .with_bundle_update() - .without_state_clear() - .build(); + let db = BalDatabase::new( + State::builder() + .with_database(EmptyDB::default()) + .with_bundle_update() + .without_state_clear() + .build(), + ); let evm = EthEvm::new( Context::mainnet().with_db(db).build_mainnet_with_inspector(NoOpInspector {}), false, @@ -408,6 +414,7 @@ mod tests { nonce: 10, code_hash: B256::random(), code: Default::default(), + storage_id: None, }, storage, status: AccountStatus::default(), diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 8be64d00e7b..363358bf3d4 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -801,6 +801,7 @@ mod tests { nonce: rng.random::(), code_hash: KECCAK_EMPTY, code: Some(Default::default()), + storage_id: None, }, storage, status: AccountStatus::Touched, diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 37fd42e8a4b..084688a90bf 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -37,7 +37,7 @@ use reth_provider::{ ExecutionOutcome, HashedPostStateProvider, ProviderError, StateProvider, StateProviderFactory, StateReader, StateRootProvider, TrieReader, }; -use reth_revm::db::State; +use reth_revm::db::{bal::BalDatabase, State}; use reth_trie::{ updates::{TrieUpdates, TrieUpdatesSorted}, HashedPostState, KeccakKeyHasher, TrieInput, @@ -625,14 +625,13 @@ where Evm: ConfigureEngineEvm, { debug!(target: "engine::tree::payload_validator", "Executing block"); - let mut db = State::builder() - .with_database(StateProviderDatabase::new(&state_provider)) - .with_bundle_update() - .with_bal_builder()//TODO - .without_state_clear() - .build(); - db.bal_index = 0; - db.bal_builder = Some(revm::state::bal::Bal::new()); + let mut db = BalDatabase::new( + State::builder() + .with_database(StateProviderDatabase::new(&state_provider)) + .with_bundle_update() + .without_state_clear() + .build(), + ); let evm = self.evm_config.evm_with_env(&mut db, env.evm_env.clone()); let ctx = diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index d1512bbeb80..263a08b4f86 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -18,7 +18,10 @@ use reth_payload_primitives::{BuiltPayload, EngineApiMessageVersion, PayloadType use reth_primitives_traits::{ block::Block as _, BlockBody as _, BlockTy, HeaderTy, SealedBlock, SignedTransaction, }; -use reth_revm::{database::StateProviderDatabase, db::State}; +use reth_revm::{ + database::StateProviderDatabase, + db::{bal::BalDatabase, State}, +}; use reth_storage_api::{errors::ProviderError, BlockReader, StateProviderFactory}; use std::{ collections::VecDeque, @@ -280,14 +283,12 @@ where // Configure state let state_provider = provider.state_by_block_hash(reorg_target.header().parent_hash())?; - let mut state = State::builder() - .with_database_ref(StateProviderDatabase::new(&state_provider)) - .with_bundle_update() - .with_bal_builder() - .build(); - - state.bal_index = 0; - state.bal_builder = Some(revm::state::bal::Bal::new()); + let mut state = BalDatabase::new( + State::builder() + .with_database_ref(StateProviderDatabase::new(&state_provider)) + .with_bundle_update() + .build(), + ); let ctx = evm_config.context_for_block(&reorg_target).map_err(RethError::other)?; let evm = evm_config.evm_for_block(&mut state, &reorg_target).map_err(RethError::other)?; diff --git a/crates/ethereum/evm/src/test_utils.rs b/crates/ethereum/evm/src/test_utils.rs index 72edba8e260..9c4863b5dd3 100644 --- a/crates/ethereum/evm/src/test_utils.rs +++ b/crates/ethereum/evm/src/test_utils.rs @@ -19,7 +19,7 @@ use reth_execution_types::{BlockExecutionResult, ExecutionOutcome}; use reth_primitives_traits::{BlockTy, SealedBlock, SealedHeader}; use revm::{ context::result::{ExecutionResult, Output, ResultAndState, SuccessReason}, - database::State, + database::{bal::BalDatabase, State}, Inspector, }; @@ -58,12 +58,13 @@ impl BlockExecutorFactory for MockEvmConfig { fn create_executor<'a, DB, I>( &'a self, - evm: EthEvm<&'a mut State, I, PrecompilesMap>, + evm: EthEvm<&'a mut BalDatabase>, I, PrecompilesMap>, _ctx: Self::ExecutionCtx<'a>, ) -> impl BlockExecutorFor<'a, Self, DB, I> where DB: Database + 'a, - I: Inspector<::Context<&'a mut State>> + 'a, + I: Inspector<::Context<&'a mut BalDatabase>>> + + 'a, { MockExecutor { result: self.exec_results.lock().pop().unwrap(), evm, hook: None } } @@ -73,15 +74,15 @@ impl BlockExecutorFactory for MockEvmConfig { #[derive(derive_more::Debug)] pub struct MockExecutor<'a, DB: Database, I> { result: ExecutionOutcome, - evm: EthEvm<&'a mut State, I, PrecompilesMap>, + evm: EthEvm<&'a mut BalDatabase>, I, PrecompilesMap>, #[debug(skip)] hook: Option>, } -impl<'a, DB: Database, I: Inspector>>> BlockExecutor +impl<'a, DB: Database, I: Inspector>>>> BlockExecutor for MockExecutor<'a, DB, I> { - type Evm = EthEvm<&'a mut State, I, PrecompilesMap>; + type Evm = EthEvm<&'a mut BalDatabase>, I, PrecompilesMap>; type Transaction = TransactionSigned; type Receipt = Receipt; diff --git a/crates/ethereum/evm/tests/execute.rs b/crates/ethereum/evm/tests/execute.rs index a266722c18f..db97257d260 100644 --- a/crates/ethereum/evm/tests/execute.rs +++ b/crates/ethereum/evm/tests/execute.rs @@ -38,6 +38,7 @@ fn create_database_with_beacon_root_contract() -> CacheDB { code_hash: keccak256(BEACON_ROOTS_CODE.clone()), nonce: 1, code: Some(Bytecode::new_raw(BEACON_ROOTS_CODE.clone())), + storage_id: None, }; db.insert_account_info(BEACON_ROOTS_ADDRESS, beacon_root_contract_account); @@ -53,6 +54,7 @@ fn create_database_with_withdrawal_requests_contract() -> CacheDB { balance: U256::ZERO, code_hash: keccak256(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone()), code: Some(Bytecode::new_raw(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone())), + storage_id: None, }; db.insert_account_info( @@ -359,6 +361,7 @@ fn create_database_with_block_hashes(latest_block: u64) -> CacheDB { code_hash: keccak256(HISTORY_STORAGE_CODE.clone()), code: Some(Bytecode::new_raw(HISTORY_STORAGE_CODE.clone())), nonce: 1, + storage_id: None, }; db.insert_account_info(HISTORY_STORAGE_ADDRESS, blockhashes_contract_account); diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 5a0f394608d..9e9063002c0 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -29,7 +29,10 @@ use reth_payload_builder::{BlobSidecars, EthBuiltPayload, EthPayloadBuilderAttri use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives_traits::transaction::error::InvalidTransactionError; -use reth_revm::{database::StateProviderDatabase, db::State}; +use reth_revm::{ + database::StateProviderDatabase, + db::{bal::BalDatabase, State}, +}; use reth_storage_api::StateProviderFactory; use reth_transaction_pool::{ error::{Eip4844PoolTransactionError, InvalidPoolTransactionError}, @@ -154,13 +157,10 @@ where let state_provider = client.state_by_block_hash(parent_header.hash())?; let state = StateProviderDatabase::new(&state_provider); - let mut db = State::builder() - .with_database(cached_reads.as_db_mut(state)) - .with_bundle_update() - .with_bal_builder() - .build(); - db.bal_index = 0; - db.bal_builder = Some(revm::state::bal::Bal::new()); + let mut db = BalDatabase::new( + State::builder().with_database(cached_reads.as_db_mut(state)).with_bundle_update().build(), + ); + let mut builder = evm_config .builder_for_next_block( &mut db, diff --git a/crates/evm/evm/src/either.rs b/crates/evm/evm/src/either.rs index 904ce7ebbd6..aa28484a6f9 100644 --- a/crates/evm/evm/src/either.rs +++ b/crates/evm/evm/src/either.rs @@ -66,7 +66,7 @@ where } } - fn into_state(self) -> revm::database::State { + fn into_state(self) -> revm::database::bal::BalDatabase> { match self { Self::Left(a) => a.into_state(), Self::Right(b) => b.into_state(), diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 0ce5890f133..2685f961ce6 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -23,7 +23,7 @@ pub use reth_storage_errors::provider::ProviderError; use reth_trie_common::{updates::TrieUpdates, HashedPostState}; use revm::{ context::result::ExecutionResult, - database::{states::bundle_state::BundleRetention, BundleState, State}, + database::{bal::BalDatabase, states::bundle_state::BundleRetention, BundleState, State}, }; /// A type that knows how to execute a block. It is assumed to operate on a @@ -140,7 +140,7 @@ pub trait Executor: Sized { } /// Consumes the executor and returns the [`State`] containing all state changes. - fn into_state(self) -> State; + fn into_state(self) -> BalDatabase>; /// The size hint of the batch's tracked state size. /// @@ -470,7 +470,7 @@ where Spec = ::Spec, HaltReason = ::HaltReason, BlockEnv = ::BlockEnv, - DB = &'a mut State, + DB = &'a mut BalDatabase>, >, Transaction = N::SignedTx, Receipt = N::Receipt, @@ -558,20 +558,16 @@ pub struct BasicBlockExecutor { /// Block execution strategy. pub(crate) strategy_factory: F, /// Database. - pub(crate) db: State, + pub(crate) db: BalDatabase>, } impl BasicBlockExecutor { /// Creates a new `BasicBlockExecutor` with the given strategy. pub fn new(strategy_factory: F, db: DB) -> Self { - let mut db = State::builder() - .with_database(db) - .with_bundle_update() - .with_bal_builder() - .without_state_clear() - .build(); - db.bal_index = 0; - db.bal_builder = Some(revm::state::bal::Bal::new()); + let db = BalDatabase::new( + State::builder().with_database(db).with_bundle_update().without_state_clear().build(), + ); + Self { strategy_factory, db } } } @@ -620,7 +616,7 @@ where Ok(result) } - fn into_state(self) -> State { + fn into_state(self) -> BalDatabase> { self.db } @@ -717,7 +713,7 @@ mod tests { Err(BlockExecutionError::msg("execution unavailable for tests")) } - fn into_state(self) -> State { + fn into_state(self) -> BalDatabase> { unreachable!() } @@ -747,6 +743,7 @@ mod tests { nonce, code_hash: KECCAK_EMPTY, code: None, + storage_id: None, }; state.insert_account(addr, account_info); state @@ -783,8 +780,13 @@ mod tests { let mut state = setup_state_with_account(addr1, 100, 1); - let account2 = - AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + let account2 = AccountInfo { + balance: U256::from(200), + nonce: 1, + code_hash: KECCAK_EMPTY, + code: None, + storage_id: None, + }; state.insert_account(addr2, account2); let mut increments = HashMap::default(); @@ -805,8 +807,13 @@ mod tests { let mut state = setup_state_with_account(addr1, 100, 1); - let account2 = - AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + let account2 = AccountInfo { + balance: U256::from(200), + nonce: 1, + code_hash: KECCAK_EMPTY, + code: None, + storage_id: None, + }; state.insert_account(addr2, account2); let mut increments = HashMap::default(); diff --git a/crates/evm/evm/src/lib.rs b/crates/evm/evm/src/lib.rs index c70a885f2b2..3f99eb58e92 100644 --- a/crates/evm/evm/src/lib.rs +++ b/crates/evm/evm/src/lib.rs @@ -36,6 +36,7 @@ use reth_execution_errors::BlockExecutionError; use reth_primitives_traits::{ BlockTy, HeaderTy, NodePrimitives, ReceiptTy, SealedBlock, SealedHeader, TxTy, }; +use revm::database::bal::BalDatabase; pub mod either; /// EVM environment configuration. @@ -312,12 +313,12 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { /// Creates a strategy with given EVM and execution context. fn create_executor<'a, DB, I>( &'a self, - evm: EvmFor, I>, + evm: EvmFor>, I>, ctx: ::ExecutionCtx<'a>, ) -> impl BlockExecutorFor<'a, Self::BlockExecutorFactory, DB, I> where DB: Database, - I: InspectorFor> + 'a, + I: InspectorFor>> + 'a, { self.block_executor_factory().create_executor(evm, ctx) } @@ -325,7 +326,7 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { /// Creates a strategy for execution of a given block. fn executor_for_block<'a, DB: Database>( &'a self, - db: &'a mut State, + db: &'a mut BalDatabase>, block: &'a SealedBlock<::Block>, ) -> Result, Self::Error> { let evm = self.evm_for_block(db, block.header())?; @@ -350,7 +351,7 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { /// ``` fn create_block_builder<'a, DB, I>( &'a self, - evm: EvmFor, I>, + evm: EvmFor>, I>, parent: &'a SealedHeader>, ctx: ::ExecutionCtx<'a>, ) -> impl BlockBuilder< @@ -359,7 +360,7 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { > where DB: Database, - I: InspectorFor> + 'a, + I: InspectorFor>> + 'a, { BasicBlockBuilder { executor: self.create_executor(evm, ctx.clone()), @@ -401,7 +402,7 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { /// ``` fn builder_for_next_block<'a, DB: Database>( &'a self, - db: &'a mut State, + db: &'a mut BalDatabase>, parent: &'a SealedHeader<::BlockHeader>, attributes: Self::NextBlockEnvCtx, ) -> Result< diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 49c35247297..1ee11f67cc6 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -924,10 +924,20 @@ mod tests { let address3 = Address::random(); // Set up account info with some changes - let account_info1 = - AccountInfo { nonce: 1, balance: U256::from(100), code_hash: B256::ZERO, code: None }; - let account_info2 = - AccountInfo { nonce: 2, balance: U256::from(200), code_hash: B256::ZERO, code: None }; + let account_info1 = AccountInfo { + nonce: 1, + balance: U256::from(100), + code_hash: B256::ZERO, + code: None, + storage_id: None, + }; + let account_info2 = AccountInfo { + nonce: 2, + balance: U256::from(200), + code_hash: B256::ZERO, + code: None, + storage_id: None, + }; // Set up the bundle state with these accounts let mut bundle_state = BundleState::default(); diff --git a/crates/optimism/flashblocks/src/worker.rs b/crates/optimism/flashblocks/src/worker.rs index 8cf7777f6a6..39ca6fe5d7b 100644 --- a/crates/optimism/flashblocks/src/worker.rs +++ b/crates/optimism/flashblocks/src/worker.rs @@ -11,7 +11,11 @@ use reth_execution_types::ExecutionOutcome; use reth_primitives_traits::{ AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered, }; -use reth_revm::{cached::CachedReads, database::StateProviderDatabase, db::State}; +use reth_revm::{ + cached::CachedReads, + database::StateProviderDatabase, + db::{bal::BalDatabase, State}, +}; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_storage_api::{noop::NoopProvider, BlockReaderIdExt, StateProviderFactory}; use std::{ @@ -90,7 +94,9 @@ where .map(|(_, state)| state) .unwrap_or_default(); let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); - let mut state = State::builder().with_database(cached_db).with_bundle_update().build(); + let mut state = BalDatabase::new( + State::builder().with_database(cached_db).with_bundle_update().build(), + ); let mut builder = self .evm_config diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 67b8faf5608..ad8152123b7 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -39,7 +39,10 @@ use reth_revm::{ }; use reth_storage_api::{errors::ProviderError, StateProvider, StateProviderFactory}; use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; -use revm::context::{Block, BlockEnv}; +use revm::{ + context::{Block, BlockEnv}, + database::bal::BalDatabase, +}; use std::{marker::PhantomData, sync::Arc}; use tracing::{debug, trace, warn}; @@ -338,7 +341,8 @@ impl OpBuilder<'_, Txs> { let Self { best } = self; debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number(), "building new payload"); - let mut db = State::builder().with_database(db).with_bundle_update().build(); + let mut db = + BalDatabase::new(State::builder().with_database(db).with_bundle_update().build()); let mut builder = ctx.block_builder(&mut db)?; @@ -417,10 +421,12 @@ impl OpBuilder<'_, Txs> { Txs: PayloadTransactions>, Attrs: OpAttributes, { - let mut db = State::builder() - .with_database(StateProviderDatabase::new(&state_provider)) - .with_bundle_update() - .build(); + let mut db = BalDatabase::new( + State::builder() + .with_database(StateProviderDatabase::new(&state_provider)) + .with_bundle_update() + .build(), + ); let mut builder = ctx.block_builder(&mut db)?; builder.apply_pre_execution_changes()?; @@ -585,7 +591,7 @@ where /// Prepares a [`BlockBuilder`] for the next block. pub fn block_builder<'a, DB: Database>( &'a self, - db: &'a mut State, + db: &'a mut BalDatabase>, ) -> Result + 'a, PayloadBuilderError> { self.evm_config .builder_for_next_block( diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index 8c4a496dabd..5247b8f22fe 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -238,6 +238,7 @@ impl From for AccountInfo { nonce: reth_acc.nonce, code_hash: reth_acc.bytecode_hash.unwrap_or(KECCAK_EMPTY), code: None, + storage_id: None, } } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 94ef5560cf9..8da63dd57be 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -27,7 +27,7 @@ use reth_node_api::BlockBody; use reth_primitives_traits::Recovered; use reth_revm::{ database::StateProviderDatabase, - db::{CacheDB, State}, + db::{bal::BalDatabase, CacheDB, State}, }; use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ @@ -98,12 +98,9 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let this = self.clone(); self.spawn_with_state_at_block(block, move |state| { //todo? - let mut db = State::builder() - .with_database(StateProviderDatabase::new(state)) - .with_bal_builder() - .build(); - db.bal_index = 0; - db.bal_builder = Some(revm::state::bal::Bal::new()); + let mut db = BalDatabase::new( + State::builder().with_database(StateProviderDatabase::new(state)).build(), + ); let mut blocks: Vec>> = Vec::with_capacity(block_state_calls.len()); for block in block_state_calls { @@ -136,12 +133,12 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } apply_block_overrides( block_overrides, - &mut db, + &mut db.db, evm_env.block_env.inner_mut(), ); } if let Some(state_overrides) = state_overrides { - apply_state_overrides(state_overrides, &mut db) + apply_state_overrides(state_overrides, &mut db.db) .map_err(Self::Error::from_eth_err)?; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index c0dbd37026b..97400657e0f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -16,7 +16,10 @@ use reth_evm::{ ConfigureEvm, Evm, NextBlockEnvAttributes, }; use reth_primitives_traits::{transaction::error::InvalidTransactionError, HeaderTy, SealedHeader}; -use reth_revm::{database::StateProviderDatabase, db::State}; +use reth_revm::{ + database::StateProviderDatabase, + db::{bal::BalDatabase, State}, +}; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_types::{ block::BlockAndReceipts, builder::config::PendingBlockKind, EthApiError, PendingBlock, @@ -237,10 +240,7 @@ pub trait LoadPendingBlock: .map_err(Self::Error::from_eth_err)?; let state = StateProviderDatabase::new(&state_provider); let mut db = - State::builder().with_database(state).with_bundle_update().with_bal_builder().build(); - - db.bal_index = 0; - db.bal_builder = Some(revm::state::bal::Bal::new()); + BalDatabase::new(State::builder().with_database(state).with_bundle_update().build()); let mut builder = self .evm_config() diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index ef65e4ccc2b..26509a07aff 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -12,6 +12,7 @@ pub use api::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; use core::time::Duration; use reth_errors::{BlockExecutionError, BlockValidationError, RethError}; use reth_primitives_traits::transaction::{error::InvalidTransactionError, signed::RecoveryError}; +use reth_revm::db::bal::BalDatabaseError; use reth_rpc_convert::{CallFeesError, EthTxEnvError, TransactionConversionError}; use reth_rpc_server_types::result::{ block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, @@ -20,8 +21,11 @@ use reth_transaction_pool::error::{ Eip4844PoolTransactionError, Eip7702PoolTransactionError, InvalidPoolTransactionError, PoolError, PoolErrorKind, PoolTransactionError, }; -use revm::context_interface::result::{ - EVMError, ExecutionResult, HaltReason, InvalidHeader, InvalidTransaction, OutOfGasError, +use revm::{ + context_interface::result::{ + EVMError, ExecutionResult, HaltReason, InvalidHeader, InvalidTransaction, OutOfGasError, + }, + state::bal::BalError, }; use revm_inspectors::tracing::MuxError; use std::convert::Infallible; @@ -1084,6 +1088,24 @@ pub fn ensure_success + FromEthApiError>( } } +impl From> for EthApiError +where + E: Into, +{ + fn from(value: BalDatabaseError) -> Self { + match value { + BalDatabaseError::Bal(err) => err.into(), + BalDatabaseError::Database(err) => err.into(), + } + } +} + +impl From for EthApiError { + fn from(err: BalError) -> Self { + EthApiError::EvmCustom(format!("bal error: {:?}", err)) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/stateless/src/witness_db.rs b/crates/stateless/src/witness_db.rs index 4a99c286ad3..40acf2289dc 100644 --- a/crates/stateless/src/witness_db.rs +++ b/crates/stateless/src/witness_db.rs @@ -82,6 +82,7 @@ where nonce: account.nonce, code_hash: account.code_hash, code: None, + storage_id: None, }) }) } diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 8fb994daddd..15254d6ee50 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -788,6 +788,7 @@ mod tests { nonce: 42, code_hash: B256::random(), code: Some(Bytecode::new_raw(Bytes::from(vec![1, 2]))), + storage_id: None, }; let mut storage = StorageWithOriginalValues::default(); @@ -832,6 +833,7 @@ mod tests { nonce: 1, code_hash: B256::random(), code: None, + storage_id: None, }; // Create hashed accounts with addresses. diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 1d93226dd6a..00b3b44bf5f 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -8,7 +8,10 @@ use alloy_evm::{ block::{BlockExecutorFactory, BlockExecutorFor, ExecutableTx}, eth::{EthBlockExecutionCtx, EthBlockExecutor}, precompiles::PrecompilesMap, - revm::context::{result::ResultAndState, Block as _}, + revm::{ + context::{result::ResultAndState, Block as _}, + database::bal::BalDatabase, + }, EthEvm, EthEvmFactory, }; use alloy_sol_macro::sol; @@ -102,12 +105,12 @@ impl BlockExecutorFactory for CustomEvmConfig { fn create_executor<'a, DB, I>( &'a self, - evm: EthEvm<&'a mut State, I, PrecompilesMap>, + evm: EthEvm<&'a mut BalDatabase>, I, PrecompilesMap>, ctx: EthBlockExecutionCtx<'a>, ) -> impl BlockExecutorFor<'a, Self, DB, I> where DB: Database + 'a, - I: InspectorFor> + 'a, + I: InspectorFor>> + 'a, { CustomBlockExecutor { inner: EthBlockExecutor::new( @@ -191,7 +194,7 @@ pub struct CustomBlockExecutor<'a, Evm> { impl<'db, DB, E> BlockExecutor for CustomBlockExecutor<'_, E> where DB: Database + 'db, - E: Evm, Tx = TxEnv>, + E: Evm>, Tx = TxEnv>, { type Transaction = TransactionSigned; type Receipt = Receipt; diff --git a/examples/custom-node/src/evm/executor.rs b/examples/custom-node/src/evm/executor.rs index 5288e1d67a5..6f2c0fc7773 100644 --- a/examples/custom-node/src/evm/executor.rs +++ b/examples/custom-node/src/evm/executor.rs @@ -17,7 +17,10 @@ use alloy_evm::{ use alloy_op_evm::{OpBlockExecutionCtx, OpBlockExecutor}; use reth_ethereum::evm::primitives::InspectorFor; use reth_op::{chainspec::OpChainSpec, node::OpRethReceiptBuilder, OpReceipt}; -use revm::{context::result::ResultAndState, database::State}; +use revm::{ + context::result::ResultAndState, + database::{bal::BalDatabase, State}, +}; use std::sync::Arc; pub struct CustomBlockExecutor { @@ -27,7 +30,7 @@ pub struct CustomBlockExecutor { impl<'db, DB, E> BlockExecutor for CustomBlockExecutor where DB: Database + 'db, - E: Evm, Tx = CustomTxEnv>, + E: Evm>, Tx = CustomTxEnv>, { type Transaction = CustomTransaction; type Receipt = OpReceipt; @@ -91,12 +94,12 @@ impl BlockExecutorFactory for CustomEvmConfig { fn create_executor<'a, DB, I>( &'a self, - evm: CustomEvm<&'a mut State, I, PrecompilesMap>, + evm: CustomEvm<&'a mut BalDatabase>, I, PrecompilesMap>, ctx: CustomBlockExecutionCtx, ) -> impl BlockExecutorFor<'a, Self, DB, I> where DB: Database + 'a, - I: InspectorFor> + 'a, + I: InspectorFor>> + 'a, { CustomBlockExecutor { inner: OpBlockExecutor::new( From f90d778d2050c01f2c0e8d8bb4661e0a04c351fc Mon Sep 17 00:00:00 2001 From: Ishika Choudhury <117741714+Rimeeeeee@users.noreply.github.com> Date: Fri, 24 Oct 2025 11:10:23 +0530 Subject: [PATCH 22/23] Revert "Merge branch 'main' into new-approach1" This reverts commit 7ee4801426fccf8dd418fef9eca9af5e801acdf6, reversing changes made to 663690f348a35da6e2588b0909633364dc6cc9cf. --- Cargo.lock | 31 ---- Cargo.toml | 7 +- crates/chainspec/src/spec.rs | 143 +++------------ crates/cli/commands/src/p2p/mod.rs | 1 - crates/consensus/consensus/src/noop.rs | 28 --- crates/e2e-test-utils/src/lib.rs | 16 +- crates/engine/tree/src/tree/cached_state.rs | 143 ++++++--------- .../configured_sparse_trie.rs | 14 -- .../tree/src/tree/payload_processor/mod.rs | 35 +--- .../src/tree/payload_processor/multiproof.rs | 142 +++++++-------- .../src/tree/payload_processor/prewarm.rs | 16 +- crates/ethereum/cli/Cargo.toml | 2 - crates/ethereum/cli/src/app.rs | 51 +----- crates/node/core/Cargo.toml | 3 +- crates/node/core/src/args/pruning.rs | 33 ++-- crates/node/core/src/args/trace.rs | 58 +++--- crates/node/events/src/cl.rs | 10 +- crates/node/events/src/node.rs | 11 ++ crates/optimism/chainspec/src/basefee.rs | 169 ++---------------- crates/optimism/cli/Cargo.toml | 3 - crates/optimism/cli/src/app.rs | 50 +----- crates/optimism/evm/src/build.rs | 8 +- crates/optimism/evm/src/error.rs | 3 - crates/optimism/evm/src/l1.rs | 136 ++------------ crates/optimism/payload/Cargo.toml | 1 - crates/optimism/payload/src/builder.rs | 48 +---- crates/optimism/rpc/src/eth/receipt.rs | 20 +-- crates/optimism/rpc/src/eth/transaction.rs | 2 +- crates/optimism/txpool/src/validator.rs | 4 +- crates/prune/db/Cargo.toml | 15 -- crates/prune/db/src/lib.rs | 1 - crates/rpc/rpc-convert/src/transaction.rs | 1 + .../rpc-eth-api/src/helpers/transaction.rs | 2 +- crates/rpc/rpc/src/admin.rs | 48 ++--- .../static-file/src/segments/headers.rs | 54 ++++++ .../static-file/src/segments/mod.rs | 6 + .../static-file/src/segments/transactions.rs | 60 +++++++ .../static-file/src/static_file_producer.rs | 95 ++++++++-- crates/static-file/types/src/lib.rs | 94 ++++++++-- crates/storage/errors/src/lib.rs | 3 + crates/storage/errors/src/provider.rs | 5 +- crates/storage/errors/src/writer.rs | 24 +++ .../src/providers/static_file/manager.rs | 2 + crates/tracing-otlp/Cargo.toml | 3 +- crates/tracing-otlp/src/lib.rs | 71 +++----- crates/tracing/src/layers.rs | 17 +- crates/trie/parallel/src/proof_task.rs | 86 +-------- crates/trie/sparse-parallel/src/lower.rs | 22 --- crates/trie/sparse-parallel/src/trie.rs | 55 ------ crates/trie/sparse/src/state.rs | 51 ------ crates/trie/sparse/src/traits.rs | 8 - crates/trie/sparse/src/trie.rs | 32 ---- docs/vocs/docs/pages/cli/reth.mdx | 18 +- docs/vocs/docs/pages/cli/reth/config.mdx | 18 +- docs/vocs/docs/pages/cli/reth/db.mdx | 18 +- docs/vocs/docs/pages/cli/reth/db/checksum.mdx | 18 +- docs/vocs/docs/pages/cli/reth/db/clear.mdx | 18 +- .../docs/pages/cli/reth/db/clear/mdbx.mdx | 18 +- .../pages/cli/reth/db/clear/static-file.mdx | 18 +- docs/vocs/docs/pages/cli/reth/db/diff.mdx | 18 +- docs/vocs/docs/pages/cli/reth/db/drop.mdx | 18 +- docs/vocs/docs/pages/cli/reth/db/get.mdx | 18 +- docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx | 18 +- .../pages/cli/reth/db/get/static-file.mdx | 18 +- docs/vocs/docs/pages/cli/reth/db/list.mdx | 18 +- docs/vocs/docs/pages/cli/reth/db/path.mdx | 18 +- .../docs/pages/cli/reth/db/repair-trie.mdx | 18 +- docs/vocs/docs/pages/cli/reth/db/stats.mdx | 18 +- docs/vocs/docs/pages/cli/reth/db/version.mdx | 18 +- docs/vocs/docs/pages/cli/reth/download.mdx | 18 +- .../vocs/docs/pages/cli/reth/dump-genesis.mdx | 18 +- docs/vocs/docs/pages/cli/reth/export-era.mdx | 18 +- docs/vocs/docs/pages/cli/reth/import-era.mdx | 18 +- docs/vocs/docs/pages/cli/reth/import.mdx | 18 +- docs/vocs/docs/pages/cli/reth/init-state.mdx | 18 +- docs/vocs/docs/pages/cli/reth/init.mdx | 18 +- docs/vocs/docs/pages/cli/reth/node.mdx | 44 ++--- docs/vocs/docs/pages/cli/reth/p2p.mdx | 18 +- docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 18 +- .../vocs/docs/pages/cli/reth/p2p/bootnode.mdx | 18 +- docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 18 +- docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 18 +- .../docs/pages/cli/reth/p2p/rlpx/ping.mdx | 18 +- docs/vocs/docs/pages/cli/reth/prune.mdx | 18 +- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 18 +- docs/vocs/docs/pages/cli/reth/stage.mdx | 18 +- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 18 +- docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 18 +- .../cli/reth/stage/dump/account-hashing.mdx | 18 +- .../pages/cli/reth/stage/dump/execution.mdx | 18 +- .../docs/pages/cli/reth/stage/dump/merkle.mdx | 18 +- .../cli/reth/stage/dump/storage-hashing.mdx | 18 +- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 18 +- .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 18 +- .../cli/reth/stage/unwind/num-blocks.mdx | 18 +- .../pages/cli/reth/stage/unwind/to-block.mdx | 18 +- examples/custom-node/src/pool.rs | 2 +- 97 files changed, 779 insertions(+), 1984 deletions(-) delete mode 100644 crates/prune/db/Cargo.toml delete mode 100644 crates/prune/db/src/lib.rs create mode 100644 crates/static-file/static-file/src/segments/headers.rs create mode 100644 crates/static-file/static-file/src/segments/transactions.rs create mode 100644 crates/storage/errors/src/writer.rs diff --git a/Cargo.lock b/Cargo.lock index 4de4484f21c..ab0851a79ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4458,19 +4458,6 @@ dependencies = [ "webpki-roots 1.0.3", ] -[[package]] -name = "hyper-timeout" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" -dependencies = [ - "hyper", - "hyper-util", - "pin-project-lite", - "tokio", - "tower-service", -] - [[package]] name = "hyper-util" version = "0.1.17" @@ -6184,8 +6171,6 @@ dependencies = [ "prost", "reqwest", "thiserror 2.0.17", - "tokio", - "tonic", "tracing", ] @@ -8304,10 +8289,8 @@ dependencies = [ "reth-node-metrics", "reth-rpc-server-types", "reth-tracing", - "reth-tracing-otlp", "tempfile", "tracing", - "url", ] [[package]] @@ -9005,7 +8988,6 @@ dependencies = [ "reth-storage-api", "reth-storage-errors", "reth-tracing", - "reth-tracing-otlp", "reth-transaction-pool", "secp256k1 0.30.0", "serde", @@ -9260,13 +9242,11 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-tracing", - "reth-tracing-otlp", "serde", "tempfile", "tokio", "tokio-util", "tracing", - "url", ] [[package]] @@ -9442,7 +9422,6 @@ version = "1.8.2" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-evm", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", @@ -9819,10 +9798,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-prune-db" -version = "1.8.2" - [[package]] name = "reth-prune-types" version = "1.8.2" @@ -10587,7 +10562,6 @@ dependencies = [ name = "reth-tracing-otlp" version = "1.8.2" dependencies = [ - "clap", "eyre", "opentelemetry", "opentelemetry-otlp", @@ -12591,15 +12565,10 @@ dependencies = [ "http", "http-body", "http-body-util", - "hyper", - "hyper-timeout", - "hyper-util", "percent-encoding", "pin-project", "sync_wrapper", - "tokio", "tokio-stream", - "tower", "tower-layer", "tower-service", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 4e38992e7c3..4d79a7c08d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,7 +93,6 @@ members = [ "crates/payload/util/", "crates/primitives-traits/", "crates/primitives/", - "crates/prune/db", "crates/prune/prune", "crates/prune/types", "crates/ress/protocol", @@ -479,14 +478,14 @@ revm-inspector = { version = "11.1.0", default-features = false } revm-context = { version = "10.1.0", default-features = false } revm-context-interface = { version = "11.1.0", default-features = false } revm-database-interface = { version = "8.0.1", default-features = false } -op-revm = { version = "11.2.0", default-features = false } +op-revm = { version = "11.1.0", default-features = false } revm-inspectors = "0.31.0" # eth alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.4.1" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.22.4", default-features = false } +alloy-evm = { version = "0.22.0", default-features = false } alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.4.1" @@ -524,7 +523,7 @@ alloy-transport-ipc = { version = "1.0.41", default-features = false } alloy-transport-ws = { version = "1.0.41", default-features = false } # op -alloy-op-evm = { version = "0.22.4", default-features = false } +alloy-op-evm = { version = "0.22.0", default-features = false } alloy-op-hardforks = "0.4.0" op-alloy-rpc-types = { version = "0.21.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.21.0", default-features = false } diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 8eb815a9217..540c1c610a2 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -3,12 +3,7 @@ use alloy_evm::eth::spec::EthExecutorSpec; use crate::{ constants::{MAINNET_DEPOSIT_CONTRACT, MAINNET_PRUNE_DELETE_LIMIT}, - ethereum::SEPOLIA_PARIS_TTD, - holesky, hoodi, - mainnet::{MAINNET_PARIS_BLOCK, MAINNET_PARIS_TTD}, - sepolia, - sepolia::SEPOLIA_PARIS_BLOCK, - EthChainSpec, + holesky, hoodi, sepolia, EthChainSpec, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; @@ -116,7 +111,7 @@ pub static MAINNET: LazyLock> = LazyLock::new(|| { genesis, // paris_block_and_final_difficulty: Some(( - MAINNET_PARIS_BLOCK, + 15537394, U256::from(58_750_003_716_598_352_816_469u128), )), hardforks, @@ -143,10 +138,7 @@ pub static SEPOLIA: LazyLock> = LazyLock::new(|| { ), genesis, // - paris_block_and_final_difficulty: Some(( - SEPOLIA_PARIS_BLOCK, - U256::from(17_000_018_015_853_232u128), - )), + paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))), hardforks, // https://sepolia.etherscan.io/tx/0x025ecbf81a2f1220da6285d1701dc89fb5a956b62562ee922e1a9efd73eb4b14 deposit_contract: Some(DepositContract::new( @@ -697,50 +689,26 @@ impl From for ChainSpec { // We expect no new networks to be configured with the merge, so we ignore the TTD field // and merge netsplit block from external genesis files. All existing networks that have // merged should have a static ChainSpec already (namely mainnet and sepolia). - let paris_block_and_final_difficulty = if let Some(ttd) = - genesis.config.terminal_total_difficulty - { - hardforks.push(( - EthereumHardfork::Paris.boxed(), - ForkCondition::TTD { - // NOTE: this will not work properly if the merge is not activated at - // genesis, and there is no merge netsplit block - activation_block_number: genesis - .config - .merge_netsplit_block - .or_else(|| { - // due to this limitation we can't determine the merge block, - // this is the case for perfnet testing for example - // at the time of this fix, only two networks transitioned: MAINNET + - // SEPOLIA and this parsing from genesis is used for shadowforking, so - // we can reasonably assume that if the TTD and the chainid matches - // those networks we use the activation - // blocks of those networks - match genesis.config.chain_id { - 1 => { - if ttd == MAINNET_PARIS_TTD { - return Some(MAINNET_PARIS_BLOCK) - } - } - 11155111 => { - if ttd == SEPOLIA_PARIS_TTD { - return Some(SEPOLIA_PARIS_BLOCK) - } - } - _ => {} - }; - None - }) - .unwrap_or_default(), - total_difficulty: ttd, - fork_block: genesis.config.merge_netsplit_block, - }, - )); + let paris_block_and_final_difficulty = + if let Some(ttd) = genesis.config.terminal_total_difficulty { + hardforks.push(( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { + // NOTE: this will not work properly if the merge is not activated at + // genesis, and there is no merge netsplit block + activation_block_number: genesis + .config + .merge_netsplit_block + .unwrap_or_default(), + total_difficulty: ttd, + fork_block: genesis.config.merge_netsplit_block, + }, + )); - genesis.config.merge_netsplit_block.map(|block| (block, ttd)) - } else { - None - }; + genesis.config.merge_netsplit_block.map(|block| (block, ttd)) + } else { + None + }; // Time-based hardforks let time_hardfork_opts = [ @@ -2704,71 +2672,4 @@ Post-merge hard forks (timestamp based): }; assert_eq!(hardfork_params, expected); } - - #[test] - fn parse_perf_net_genesis() { - let s = r#"{ - "config": { - "chainId": 1, - "homesteadBlock": 1150000, - "daoForkBlock": 1920000, - "daoForkSupport": true, - "eip150Block": 2463000, - "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", - "eip155Block": 2675000, - "eip158Block": 2675000, - "byzantiumBlock": 4370000, - "constantinopleBlock": 7280000, - "petersburgBlock": 7280000, - "istanbulBlock": 9069000, - "muirGlacierBlock": 9200000, - "berlinBlock": 12244000, - "londonBlock": 12965000, - "arrowGlacierBlock": 13773000, - "grayGlacierBlock": 15050000, - "terminalTotalDifficulty": 58750000000000000000000, - "terminalTotalDifficultyPassed": true, - "shanghaiTime": 1681338455, - "cancunTime": 1710338135, - "pragueTime": 1746612311, - "ethash": {}, - "depositContractAddress": "0x00000000219ab540356cBB839Cbe05303d7705Fa", - "blobSchedule": { - "cancun": { - "target": 3, - "max": 6, - "baseFeeUpdateFraction": 3338477 - }, - "prague": { - "target": 6, - "max": 9, - "baseFeeUpdateFraction": 5007716 - } - } - }, - "nonce": "0x42", - "timestamp": "0x0", - "extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa", - "gasLimit": "0x1388", - "difficulty": "0x400000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "coinbase": "0x0000000000000000000000000000000000000000", - "number": "0x0", - "gasUsed": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "baseFeePerGas": null -}"#; - - let genesis = serde_json::from_str::(s).unwrap(); - let chainspec = ChainSpec::from_genesis(genesis); - let activation = chainspec.hardforks.fork(EthereumHardfork::Paris); - assert_eq!( - activation, - ForkCondition::TTD { - activation_block_number: MAINNET_PARIS_BLOCK, - total_difficulty: MAINNET_PARIS_TTD, - fork_block: None, - } - ) - } } diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index c72ceca78e6..792d4533856 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -192,7 +192,6 @@ impl DownloadArgs { let net = NetworkConfigBuilder::::new(p2p_secret_key) .peer_config(config.peers_config_with_basic_nodes_from_file(None)) .external_ip_resolver(self.network.nat) - .network_id(self.network.network_id) .boot_nodes(boot_nodes.clone()) .apply(|builder| { self.network.discovery.apply_to_builder(builder, rlpx_socket, boot_nodes) diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 3d6818ca306..259fae27d67 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,32 +1,9 @@ -//! A consensus implementation that does nothing. -//! -//! This module provides `NoopConsensus`, a consensus implementation that performs no validation -//! and always returns `Ok(())` for all validation methods. Useful for testing and scenarios -//! where consensus validation is not required. -//! -//! # Examples -//! -//! ```rust -//! use reth_consensus::noop::NoopConsensus; -//! use std::sync::Arc; -//! -//! let consensus = NoopConsensus::default(); -//! let consensus_arc = NoopConsensus::arc(); -//! ``` -//! -//! # Warning -//! -//! **Not for production use** - provides no security guarantees or consensus validation. - use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; use alloc::sync::Arc; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; /// A Consensus implementation that does nothing. -/// -/// Always returns `Ok(())` for all validation methods. Suitable for testing and scenarios -/// where consensus validation is not required. #[derive(Debug, Copy, Clone, Default)] #[non_exhaustive] pub struct NoopConsensus; @@ -39,12 +16,10 @@ impl NoopConsensus { } impl HeaderValidator for NoopConsensus { - /// Validates a header (no-op implementation). fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { Ok(()) } - /// Validates a header against its parent (no-op implementation). fn validate_header_against_parent( &self, _header: &SealedHeader, @@ -57,7 +32,6 @@ impl HeaderValidator for NoopConsensus { impl Consensus for NoopConsensus { type Error = ConsensusError; - /// Validates body against header (no-op implementation). fn validate_body_against_header( &self, _body: &B::Body, @@ -66,14 +40,12 @@ impl Consensus for NoopConsensus { Ok(()) } - /// Validates block before execution (no-op implementation). fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { Ok(()) } } impl FullConsensus for NoopConsensus { - /// Validates block after execution (no-op implementation). fn validate_block_post_execution( &self, _block: &RecoveredBlock, diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 57d03f70fa5..e7b83cb3ad9 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -8,8 +8,8 @@ use reth_network_api::test_utils::PeersHandleProvider; use reth_node_builder::{ components::NodeComponentsBuilder, rpc::{EngineValidatorAddOn, RethRpcAddOns}, - FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodeTypes, NodeTypesWithDBAdapter, - PayloadAttributesBuilder, PayloadTypes, + FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodePrimitives, NodeTypes, + NodeTypesWithDBAdapter, PayloadAttributesBuilder, PayloadTypes, }; use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider}; use reth_tasks::TaskManager; @@ -146,6 +146,12 @@ where >, > + Node< TmpNodeAdapter>>, + Primitives: NodePrimitives< + BlockHeader = alloy_consensus::Header, + BlockBody = alloy_consensus::BlockBody< + ::SignedTx, + >, + >, ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, Components: NodeComponents< @@ -174,6 +180,12 @@ where >, > + Node< TmpNodeAdapter>>, + Primitives: NodePrimitives< + BlockHeader = alloy_consensus::Header, + BlockBody = alloy_consensus::BlockBody< + ::SignedTx, + >, + >, ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, Components: NodeComponents< diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index bc543d067a0..c1bb028cab2 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -1,5 +1,8 @@ //! Execution cache implementation for block processing. -use alloy_primitives::{Address, StorageKey, StorageValue, B256}; +use alloy_primitives::{ + map::{DefaultHashBuilder, HashSet}, + Address, StorageKey, StorageValue, B256, +}; use metrics::Gauge; use mini_moka::sync::CacheBuilder; use reth_errors::ProviderResult; @@ -14,7 +17,6 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; -use revm_primitives::map::DefaultHashBuilder; use std::{sync::Arc, time::Duration}; use tracing::{debug_span, instrument, trace}; @@ -300,65 +302,70 @@ pub(crate) struct ExecutionCache { /// Cache for contract bytecode, keyed by code hash. code_cache: Cache>, - /// Per-account storage cache: outer cache keyed by Address, inner cache tracks that account’s - /// storage slots. - storage_cache: Cache, + /// Flattened storage cache: composite key of (`Address`, `StorageKey`) maps directly to + /// values. + storage_cache: Cache<(Address, StorageKey), Option>, /// Cache for basic account information (nonce, balance, code hash). account_cache: Cache>, } impl ExecutionCache { - /// Get storage value from hierarchical cache. + /// Get storage value from flattened cache. /// /// Returns a `SlotStatus` indicating whether: - /// - `NotCached`: The account's storage cache doesn't exist - /// - `Empty`: The slot exists in the account's cache but is empty + /// - `NotCached`: The storage slot is not in the cache + /// - `Empty`: The slot exists in the cache but is empty /// - `Value`: The slot exists and has a specific value pub(crate) fn get_storage(&self, address: &Address, key: &StorageKey) -> SlotStatus { - match self.storage_cache.get(address) { + match self.storage_cache.get(&(*address, *key)) { None => SlotStatus::NotCached, - Some(account_cache) => account_cache.get_storage(key), + Some(None) => SlotStatus::Empty, + Some(Some(value)) => SlotStatus::Value(value), } } - /// Insert storage value into hierarchical cache + /// Insert storage value into flattened cache pub(crate) fn insert_storage( &self, address: Address, key: StorageKey, value: Option, ) { - self.insert_storage_bulk(address, [(key, value)]); + self.storage_cache.insert((address, key), value); } - /// Insert multiple storage values into hierarchical cache for a single account + /// Insert multiple storage values into flattened cache for a single account /// - /// This method is optimized for inserting multiple storage values for the same address - /// by doing the account cache lookup only once instead of for each key-value pair. + /// This method inserts multiple storage values for the same address directly + /// into the flattened cache. pub(crate) fn insert_storage_bulk(&self, address: Address, storage_entries: I) where I: IntoIterator)>, { - let account_cache = self.storage_cache.get(&address).unwrap_or_else(|| { - let account_cache = AccountStorageCache::default(); - self.storage_cache.insert(address, account_cache.clone()); - account_cache - }); - for (key, value) in storage_entries { - account_cache.insert_storage(key, value); + self.storage_cache.insert((address, key), value); } } - /// Invalidate storage for specific account - pub(crate) fn invalidate_account_storage(&self, address: &Address) { - self.storage_cache.invalidate(address); - } - /// Returns the total number of storage slots cached across all accounts pub(crate) fn total_storage_slots(&self) -> usize { - self.storage_cache.iter().map(|addr| addr.len()).sum() + self.storage_cache.entry_count() as usize + } + + /// Invalidates the storage for all addresses in the set + #[instrument(level = "debug", target = "engine::caching", skip_all, fields(accounts = addresses.len()))] + pub(crate) fn invalidate_storages(&self, addresses: HashSet<&Address>) { + // NOTE: this must collect because the invalidate function should not be called while we + // hold an iter for it + let storage_entries = self + .storage_cache + .iter() + .filter_map(|entry| addresses.contains(&entry.key().0).then_some(*entry.key())) + .collect::>(); + for key in storage_entries { + self.storage_cache.invalidate(&key) + } } /// Inserts the post-execution state changes into the cache. @@ -398,6 +405,7 @@ impl ExecutionCache { state_updates.state.values().map(|account| account.storage.len()).sum::() ) .entered(); + let mut invalidated_accounts = HashSet::default(); for (addr, account) in &state_updates.state { // If the account was not modified, as in not changed and not destroyed, then we have // nothing to do w.r.t. this particular account and can move on @@ -410,7 +418,7 @@ impl ExecutionCache { // Invalidate the account cache entry if destroyed self.account_cache.invalidate(addr); - self.invalidate_account_storage(addr); + invalidated_accounts.insert(addr); continue } @@ -437,6 +445,9 @@ impl ExecutionCache { self.account_cache.insert(*addr, Some(Account::from(account_info))); } + // invalidate storage for all destroyed accounts + self.invalidate_storages(invalidated_accounts); + Ok(()) } } @@ -465,11 +476,11 @@ impl ExecutionCacheBuilder { const TIME_TO_IDLE: Duration = Duration::from_secs(3600); // 1 hour let storage_cache = CacheBuilder::new(self.storage_cache_entries) - .weigher(|_key: &Address, value: &AccountStorageCache| -> u32 { - // values based on results from measure_storage_cache_overhead test - let base_weight = 39_000; - let slots_weight = value.len() * 218; - (base_weight + slots_weight) as u32 + .weigher(|_key: &(Address, StorageKey), _value: &Option| -> u32 { + // Size of composite key (Address + StorageKey) + Option + // Address: 20 bytes, StorageKey: 32 bytes, Option: 33 bytes + // Plus some overhead for the hash map entry + 120_u32 }) .max_capacity(storage_cache_size) .time_to_live(EXPIRY_TIME) @@ -592,56 +603,6 @@ impl SavedCache { } } -/// Cache for an individual account's storage slots. -/// -/// This represents the second level of the hierarchical storage cache. -/// Each account gets its own `AccountStorageCache` to store accessed storage slots. -#[derive(Debug, Clone)] -pub(crate) struct AccountStorageCache { - /// Map of storage keys to their cached values. - slots: Cache>, -} - -impl AccountStorageCache { - /// Create a new [`AccountStorageCache`] - pub(crate) fn new(max_slots: u64) -> Self { - Self { - slots: CacheBuilder::new(max_slots).build_with_hasher(DefaultHashBuilder::default()), - } - } - - /// Get a storage value from this account's cache. - /// - `NotCached`: The slot is not in the cache - /// - `Empty`: The slot is empty - /// - `Value`: The slot has a specific value - pub(crate) fn get_storage(&self, key: &StorageKey) -> SlotStatus { - match self.slots.get(key) { - None => SlotStatus::NotCached, - Some(None) => SlotStatus::Empty, - Some(Some(value)) => SlotStatus::Value(value), - } - } - - /// Insert a storage value - pub(crate) fn insert_storage(&self, key: StorageKey, value: Option) { - self.slots.insert(key, value); - } - - /// Returns the number of slots in the cache - pub(crate) fn len(&self) -> usize { - self.slots.entry_count() as usize - } -} - -impl Default for AccountStorageCache { - fn default() -> Self { - // With weigher and max_capacity in place, this number represents - // the maximum number of entries that can be stored, not the actual - // memory usage which is controlled by storage cache's max_capacity. - Self::new(1_000_000) - } -} - #[cfg(test)] mod tests { use super::*; @@ -716,32 +677,36 @@ mod tests { #[test] fn measure_storage_cache_overhead() { - let (base_overhead, cache) = measure_allocation(|| AccountStorageCache::new(1000)); - println!("Base AccountStorageCache overhead: {base_overhead} bytes"); + let (base_overhead, cache) = + measure_allocation(|| ExecutionCacheBuilder::default().build_caches(1000)); + println!("Base ExecutionCache overhead: {base_overhead} bytes"); let mut rng = rand::rng(); + let address = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::()); let (first_slot, _) = measure_allocation(|| { - cache.insert_storage(key, Some(value)); + cache.insert_storage(address, key, Some(value)); }); println!("First slot insertion overhead: {first_slot} bytes"); const TOTAL_SLOTS: usize = 10_000; let (test_slots, _) = measure_allocation(|| { for _ in 0..TOTAL_SLOTS { + let addr = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::()); - cache.insert_storage(key, Some(value)); + cache.insert_storage(addr, key, Some(value)); } }); println!("Average overhead over {} slots: {} bytes", TOTAL_SLOTS, test_slots / TOTAL_SLOTS); println!("\nTheoretical sizes:"); + println!("Address size: {} bytes", size_of::
()); println!("StorageKey size: {} bytes", size_of::()); println!("StorageValue size: {} bytes", size_of::()); println!("Option size: {} bytes", size_of::>()); - println!("Option size: {} bytes", size_of::>()); + println!("(Address, StorageKey) size: {} bytes", size_of::<(Address, StorageKey)>()); } #[test] diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index 9e8f787823a..90e8928dba2 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -186,18 +186,4 @@ impl SparseTrieInterface for ConfiguredSparseTrie { Self::Parallel(trie) => trie.value_capacity(), } } - - fn shrink_nodes_to(&mut self, size: usize) { - match self { - Self::Serial(trie) => trie.shrink_nodes_to(size), - Self::Parallel(trie) => trie.shrink_nodes_to(size), - } - } - - fn shrink_values_to(&mut self, size: usize) { - match self { - Self::Serial(trie) => trie.shrink_values_to(size), - Self::Parallel(trie) => trie.shrink_values_to(size), - } - } } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 508973b222e..363358bf3d4 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -66,29 +66,6 @@ use configured_sparse_trie::ConfiguredSparseTrie; pub const PARALLEL_SPARSE_TRIE_PARALLELISM_THRESHOLDS: ParallelismThresholds = ParallelismThresholds { min_revealed_nodes: 100, min_updated_nodes: 100 }; -/// Default node capacity for shrinking the sparse trie. This is used to limit the number of trie -/// nodes in allocated sparse tries. -/// -/// Node maps have a key of `Nibbles` and value of `SparseNode`. -/// The `size_of::` is 40, and `size_of::` is 80. -/// -/// If we have 1 million entries of 120 bytes each, this conservative estimate comes out at around -/// 120MB. -pub const SPARSE_TRIE_MAX_NODES_SHRINK_CAPACITY: usize = 1_000_000; - -/// Default value capacity for shrinking the sparse trie. This is used to limit the number of values -/// in allocated sparse tries. -/// -/// There are storage and account values, the largest of the two being account values, which are -/// essentially `TrieAccount`s. -/// -/// Account value maps have a key of `Nibbles` and value of `TrieAccount`. -/// The `size_of::` is 40, and `size_of::` is 104. -/// -/// If we have 1 million entries of 144 bytes each, this conservative estimate comes out at around -/// 144MB. -pub const SPARSE_TRIE_MAX_VALUES_SHRINK_CAPACITY: usize = 1_000_000; - /// Entrypoint for executing the payload. #[derive(Debug)] pub struct PayloadProcessor @@ -462,19 +439,11 @@ where // Send state root computation result let _ = state_root_tx.send(result); - // Clear the SparseStateTrie, shrink, and replace it back into the mutex _after_ sending + // Clear the SparseStateTrie and replace it back into the mutex _after_ sending // results to the next step, so that time spent clearing doesn't block the step after // this one. let _enter = debug_span!(target: "engine::tree::payload_processor", "clear").entered(); - let mut cleared_trie = ClearedSparseStateTrie::from_state_trie(trie); - - // Shrink the sparse trie so that we don't have ever increasing memory. - cleared_trie.shrink_to( - SPARSE_TRIE_MAX_NODES_SHRINK_CAPACITY, - SPARSE_TRIE_MAX_VALUES_SHRINK_CAPACITY, - ); - - cleared_sparse_trie.lock().replace(cleared_trie); + cleared_sparse_trie.lock().replace(ClearedSparseStateTrie::from_state_trie(trie)); }); } } diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 9f136a48125..1e5b226f591 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -24,7 +24,7 @@ use reth_trie_parallel::{ root::ParallelStateRootError, }; use std::{ - collections::BTreeMap, + collections::{BTreeMap, VecDeque}, ops::DerefMut, sync::{ mpsc::{channel, Receiver, Sender}, @@ -34,6 +34,10 @@ use std::{ }; use tracing::{debug, error, instrument, trace}; +/// Default upper bound for inflight multiproof calculations. These would be sitting in the queue +/// waiting to be processed. +const DEFAULT_MULTIPROOF_INFLIGHT_LIMIT: usize = 128; + /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. #[derive(Default, Debug)] @@ -214,7 +218,7 @@ pub(crate) fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostStat for (address, account) in update { if account.is_touched() { let hashed_address = keccak256(address); - trace!(target: "engine::tree::payload_processor::multiproof", ?address, ?hashed_address, "Adding account to state update"); + trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); let destroyed = account.is_selfdestructed(); let info = if destroyed { None } else { Some(account.info.into()) }; @@ -333,10 +337,17 @@ impl MultiproofInput { } /// Manages concurrent multiproof calculations. +/// Takes care of not having more calculations in flight than a given maximum +/// concurrency, further calculation requests are queued and spawn later, after +/// availability has been signaled. #[derive(Debug)] pub struct MultiproofManager { + /// Maximum number of proof calculations allowed to be inflight at once. + inflight_limit: usize, /// Currently running calculations. inflight: usize, + /// Queued calculations. + pending: VecDeque, /// Executor for tasks executor: WorkloadExecutor, /// Handle to the proof worker pools (storage and account). @@ -365,16 +376,22 @@ impl MultiproofManager { proof_worker_handle: ProofWorkerHandle, ) -> Self { Self { - inflight: 0, + pending: VecDeque::with_capacity(DEFAULT_MULTIPROOF_INFLIGHT_LIMIT), + inflight_limit: DEFAULT_MULTIPROOF_INFLIGHT_LIMIT, executor, + inflight: 0, metrics, proof_worker_handle, missed_leaves_storage_roots: Default::default(), } } - /// Spawns a new multiproof calculation. - fn spawn(&mut self, input: PendingMultiproofTask) { + const fn is_full(&self) -> bool { + self.inflight >= self.inflight_limit + } + + /// Spawns a new multiproof calculation or enqueues it if the inflight limit is reached. + fn spawn_or_queue(&mut self, input: PendingMultiproofTask) { // If there are no proof targets, we can just send an empty multiproof back immediately if input.proof_targets_is_empty() { debug!( @@ -385,9 +402,27 @@ impl MultiproofManager { return } + if self.is_full() { + self.pending.push_back(input); + self.metrics.pending_multiproofs_histogram.record(self.pending.len() as f64); + return; + } + self.spawn_multiproof_task(input); } + /// Signals that a multiproof calculation has finished and there's room to + /// spawn a new calculation if needed. + fn on_calculation_complete(&mut self) { + self.inflight = self.inflight.saturating_sub(1); + self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + + if let Some(input) = self.pending.pop_front() { + self.metrics.pending_multiproofs_histogram.record(self.pending.len() as f64); + self.spawn_multiproof_task(input); + } + } + /// Spawns a multiproof task, dispatching to `spawn_storage_proof` if the input is a storage /// multiproof, and dispatching to `spawn_multiproof` otherwise. fn spawn_multiproof_task(&mut self, input: PendingMultiproofTask) { @@ -421,7 +456,7 @@ impl MultiproofManager { let storage_targets = proof_targets.len(); trace!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", proof_sequence_number, ?proof_targets, storage_targets, @@ -440,7 +475,7 @@ impl MultiproofManager { .storage_proof(hashed_address, proof_targets); let elapsed = start.elapsed(); trace!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", proof_sequence_number, ?elapsed, ?source, @@ -473,24 +508,6 @@ impl MultiproofManager { self.inflight += 1; self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); - self.metrics - .pending_storage_multiproofs_histogram - .record(self.proof_worker_handle.pending_storage_tasks() as f64); - self.metrics - .pending_account_multiproofs_histogram - .record(self.proof_worker_handle.pending_account_tasks() as f64); - } - - /// Signals that a multiproof calculation has finished. - fn on_calculation_complete(&mut self) { - self.inflight = self.inflight.saturating_sub(1); - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); - self.metrics - .pending_storage_multiproofs_histogram - .record(self.proof_worker_handle.pending_storage_tasks() as f64); - self.metrics - .pending_account_multiproofs_histogram - .record(self.proof_worker_handle.pending_account_tasks() as f64); } /// Spawns a single multiproof calculation task. @@ -512,7 +529,7 @@ impl MultiproofManager { let storage_targets = proof_targets.values().map(|slots| slots.len()).sum::(); trace!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", proof_sequence_number, ?proof_targets, account_targets, @@ -550,7 +567,7 @@ impl MultiproofManager { })(); let elapsed = start.elapsed(); trace!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", proof_sequence_number, ?elapsed, ?source, @@ -581,12 +598,6 @@ impl MultiproofManager { self.inflight += 1; self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); - self.metrics - .pending_storage_multiproofs_histogram - .record(self.proof_worker_handle.pending_storage_tasks() as f64); - self.metrics - .pending_account_multiproofs_histogram - .record(self.proof_worker_handle.pending_account_tasks() as f64); } } @@ -595,10 +606,8 @@ impl MultiproofManager { pub(crate) struct MultiProofTaskMetrics { /// Histogram of inflight multiproofs. pub inflight_multiproofs_histogram: Histogram, - /// Histogram of pending storage multiproofs in the queue. - pub pending_storage_multiproofs_histogram: Histogram, - /// Histogram of pending account multiproofs in the queue. - pub pending_account_multiproofs_histogram: Histogram, + /// Histogram of pending multiproofs. + pub pending_multiproofs_histogram: Histogram, /// Histogram of the number of prefetch proof target accounts. pub prefetch_proof_targets_accounts_histogram: Histogram, @@ -648,7 +657,8 @@ pub(crate) struct MultiProofTaskMetrics { #[derive(Debug)] pub(super) struct MultiProofTask { /// The size of proof targets chunk to spawn in one calculation. - /// If None, chunking is disabled and all targets are processed in a single proof. + /// + /// If [`None`], then chunking is disabled. chunk_size: Option, /// Task configuration. config: MultiProofConfig, @@ -728,14 +738,10 @@ impl MultiProofTask { // Process proof targets in chunks. let mut chunks = 0; - - // Only chunk if account or storage workers are available to take advantage of parallelism. - let should_chunk = - self.multiproof_manager.proof_worker_handle.has_available_account_workers() || - self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); + let should_chunk = !self.multiproof_manager.is_full(); let mut spawn = |proof_targets| { - self.multiproof_manager.spawn( + self.multiproof_manager.spawn_or_queue( MultiproofInput { config: self.config.clone(), source: None, @@ -775,7 +781,7 @@ impl MultiProofTask { proofs_processed >= state_update_proofs_requested + prefetch_proofs_requested; let no_pending = !self.proof_sequencer.has_pending(); trace!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", proofs_processed, state_update_proofs_requested, prefetch_proofs_requested, @@ -830,7 +836,7 @@ impl MultiProofTask { } if duplicates > 0 { - trace!(target: "engine::tree::payload_processor::multiproof", duplicates, "Removed duplicate prefetch proof targets"); + trace!(target: "engine::root", duplicates, "Removed duplicate prefetch proof targets"); } targets @@ -867,14 +873,10 @@ impl MultiProofTask { // Process state updates in chunks. let mut chunks = 0; + let should_chunk = !self.multiproof_manager.is_full(); let mut spawned_proof_targets = MultiProofTargets::default(); - // Only chunk if account or storage workers are available to take advantage of parallelism. - let should_chunk = - self.multiproof_manager.proof_worker_handle.has_available_account_workers() || - self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); - let mut spawn = |hashed_state_update| { let proof_targets = get_proof_targets( &hashed_state_update, @@ -883,7 +885,7 @@ impl MultiProofTask { ); spawned_proof_targets.extend_ref(&proof_targets); - self.multiproof_manager.spawn( + self.multiproof_manager.spawn_or_queue( MultiproofInput { config: self.config.clone(), source: Some(source), @@ -952,7 +954,7 @@ impl MultiProofTask { /// so that the proofs for accounts and storage slots that were already fetched are not /// requested again. /// 2. Using the proof targets, a new multiproof is calculated using - /// [`MultiproofManager::spawn`]. + /// [`MultiproofManager::spawn_or_queue`]. /// * If the list of proof targets is empty, the [`MultiProofMessage::EmptyProof`] message is /// sent back to this task along with the original state update. /// * Otherwise, the multiproof is calculated and the [`MultiProofMessage::ProofCalculated`] @@ -996,18 +998,18 @@ impl MultiProofTask { let mut updates_finished_time = None; loop { - trace!(target: "engine::tree::payload_processor::multiproof", "entering main channel receiving loop"); + trace!(target: "engine::root", "entering main channel receiving loop"); match self.rx.recv() { Ok(message) => match message { MultiProofMessage::PrefetchProofs(targets) => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::PrefetchProofs"); + trace!(target: "engine::root", "processing MultiProofMessage::PrefetchProofs"); if first_update_time.is_none() { // record the wait time self.metrics .first_update_wait_time_histogram .record(start.elapsed().as_secs_f64()); first_update_time = Some(Instant::now()); - debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); + debug!(target: "engine::root", "Started state root calculation"); } let account_targets = targets.len(); @@ -1015,7 +1017,7 @@ impl MultiProofTask { targets.values().map(|slots| slots.len()).sum::(); prefetch_proofs_requested += self.on_prefetch_proof(targets); trace!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", account_targets, storage_targets, prefetch_proofs_requested, @@ -1023,20 +1025,20 @@ impl MultiProofTask { ); } MultiProofMessage::StateUpdate(source, update) => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::StateUpdate"); + trace!(target: "engine::root", "processing MultiProofMessage::StateUpdate"); if first_update_time.is_none() { // record the wait time self.metrics .first_update_wait_time_histogram .record(start.elapsed().as_secs_f64()); first_update_time = Some(Instant::now()); - debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); + debug!(target: "engine::root", "Started state root calculation"); } let len = update.len(); state_update_proofs_requested += self.on_state_update(source, update); trace!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", ?source, len, ?state_update_proofs_requested, @@ -1044,7 +1046,7 @@ impl MultiProofTask { ); } MultiProofMessage::FinishedStateUpdates => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::FinishedStateUpdates"); + trace!(target: "engine::root", "processing MultiProofMessage::FinishedStateUpdates"); updates_finished = true; updates_finished_time = Some(Instant::now()); if self.is_done( @@ -1054,14 +1056,14 @@ impl MultiProofTask { updates_finished, ) { debug!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", "State updates finished and all proofs processed, ending calculation" ); break } } MultiProofMessage::EmptyProof { sequence_number, state } => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::EmptyProof"); + trace!(target: "engine::root", "processing MultiProofMessage::EmptyProof"); proofs_processed += 1; @@ -1079,14 +1081,14 @@ impl MultiProofTask { updates_finished, ) { debug!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", "State updates finished and all proofs processed, ending calculation" ); break } } MultiProofMessage::ProofCalculated(proof_calculated) => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing + trace!(target: "engine::root", "processing MultiProofMessage::ProofCalculated"); // we increment proofs_processed for both state updates and prefetches, @@ -1098,7 +1100,7 @@ impl MultiProofTask { .record(proof_calculated.elapsed); trace!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", sequence = proof_calculated.sequence_number, total_proofs = proofs_processed, "Processing calculated proof" @@ -1119,14 +1121,14 @@ impl MultiProofTask { updates_finished, ) { debug!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", "State updates finished and all proofs processed, ending calculation"); break } } MultiProofMessage::ProofCalculationError(err) => { error!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", ?err, "proof calculation error" ); @@ -1136,14 +1138,14 @@ impl MultiProofTask { Err(_) => { // this means our internal message channel is closed, which shouldn't happen // in normal operation since we hold both ends - error!(target: "engine::tree::payload_processor::multiproof", "Internal message channel closed unexpectedly"); + error!(target: "engine::root", "Internal message channel closed unexpectedly"); return } } } debug!( - target: "engine::tree::payload_processor::multiproof", + target: "engine::root", total_updates = state_update_proofs_requested, total_proofs = proofs_processed, total_time = ?first_update_time.map(|t|t.elapsed()), diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index abc3bd58351..134233233ee 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -106,7 +106,7 @@ where let (actions_tx, actions_rx) = channel(); trace!( - target: "engine::tree::payload_processor::prewarm", + target: "engine::tree::prewarm", max_concurrency, transaction_count_hint, "Initialized prewarm task" @@ -185,7 +185,7 @@ where for handle in &handles { if let Err(err) = handle.send(indexed_tx.clone()) { warn!( - target: "engine::tree::payload_processor::prewarm", + target: "engine::tree::prewarm", tx_hash = %first_tx_hash, error = %err, "Failed to send deposit transaction to worker" @@ -196,7 +196,7 @@ where // Not a deposit, send to first worker via round-robin if let Err(err) = handles[0].send(indexed_tx) { warn!( - target: "engine::tree::payload_processor::prewarm", + target: "engine::tree::prewarm", task_idx = 0, error = %err, "Failed to send transaction to worker" @@ -213,7 +213,7 @@ where let task_idx = executing % workers_needed; if let Err(err) = handles[task_idx].send(indexed_tx) { warn!( - target: "engine::tree::payload_processor::prewarm", + target: "engine::tree::prewarm", task_idx, error = %err, "Failed to send transaction to worker" @@ -329,7 +329,7 @@ where self.send_multi_proof_targets(proof_targets); } PrewarmTaskEvent::Terminate { block_output } => { - trace!(target: "engine::tree::payload_processor::prewarm", "Received termination signal"); + trace!(target: "engine::tree::prewarm", "Received termination signal"); final_block_output = Some(block_output); if finished_execution { @@ -338,7 +338,7 @@ where } } PrewarmTaskEvent::FinishedTxExecution { executed_transactions } => { - trace!(target: "engine::tree::payload_processor::prewarm", "Finished prewarm execution signal"); + trace!(target: "engine::tree::prewarm", "Finished prewarm execution signal"); self.ctx.metrics.transactions.set(executed_transactions as f64); self.ctx.metrics.transactions_histogram.record(executed_transactions as f64); @@ -352,7 +352,7 @@ where } } - debug!(target: "engine::tree::payload_processor::prewarm", "Completed prewarm execution"); + debug!(target: "engine::tree::prewarm", "Completed prewarm execution"); // save caches and finish if let Some(Some(state)) = final_block_output { @@ -488,7 +488,7 @@ where Ok(res) => res, Err(err) => { trace!( - target: "engine::tree::payload_processor::prewarm", + target: "engine::tree::prewarm", %err, tx_hash=%tx.tx().tx_hash(), sender=%tx.signer(), diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index 5dbb8bf4cd3..e232ea0cdb1 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -23,13 +23,11 @@ reth-node-ethereum.workspace = true reth-node-metrics.workspace = true reth-rpc-server-types.workspace = true reth-tracing.workspace = true -reth-tracing-otlp.workspace = true reth-node-api.workspace = true # misc clap.workspace = true eyre.workspace = true -url.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index b947d6df1db..ab3682be6dc 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -14,10 +14,8 @@ use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthereumNo use reth_node_metrics::recorder::install_prometheus_recorder; use reth_rpc_server_types::RpcModuleValidator; use reth_tracing::{FileWorkerGuard, Layers}; -use reth_tracing_otlp::OtlpProtocol; use std::{fmt, sync::Arc}; use tracing::info; -use url::Url; /// A wrapper around a parsed CLI that handles command execution. #[derive(Debug)] @@ -98,8 +96,7 @@ where self.cli.logs.log_file_directory.join(chain_spec.chain().to_string()); } - self.init_tracing(&runner)?; - + self.init_tracing()?; // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); @@ -109,19 +106,18 @@ where /// Initializes tracing with the configured options. /// /// If file logging is enabled, this function stores guard to the struct. - /// For gRPC OTLP, it requires tokio runtime context. - pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> { + pub fn init_tracing(&mut self) -> Result<()> { if self.guard.is_none() { let mut layers = self.layers.take().unwrap_or_default(); #[cfg(feature = "otlp")] - { - self.cli.traces.validate()?; - - if let Some(endpoint) = &self.cli.traces.otlp { - info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", endpoint); - self.init_otlp_export(&mut layers, endpoint, runner)?; - } + if let Some(output_type) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); + layers.with_span_layer( + "reth".to_string(), + output_type.clone(), + self.cli.traces.otlp_filter.clone(), + )?; } self.guard = self.cli.logs.init_tracing_with_layers(layers)?; @@ -129,35 +125,6 @@ where } Ok(()) } - - /// Initialize OTLP tracing export based on protocol type. - /// - /// For gRPC, `block_on` is required because tonic's channel initialization needs - /// a tokio runtime context, even though `with_span_layer` itself is not async. - #[cfg(feature = "otlp")] - fn init_otlp_export( - &self, - layers: &mut Layers, - endpoint: &Url, - runner: &CliRunner, - ) -> Result<()> { - let endpoint = endpoint.clone(); - let protocol = self.cli.traces.protocol; - let filter_level = self.cli.traces.otlp_filter.clone(); - - match protocol { - OtlpProtocol::Grpc => { - runner.block_on(async { - layers.with_span_layer("reth".to_string(), endpoint, filter_level, protocol) - })?; - } - OtlpProtocol::Http => { - layers.with_span_layer("reth".to_string(), endpoint, filter_level, protocol)?; - } - } - - Ok(()) - } } /// Run CLI commands with the provided runner, components and launcher. diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index b1a472bd9fd..1a4f85b6198 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -58,9 +58,8 @@ url.workspace = true dirs-next.workspace = true shellexpand.workspace = true -# obs +# tracing tracing.workspace = true -reth-tracing-otlp.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] } diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 2ff67446bbf..b5c782e62bf 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -18,33 +18,33 @@ pub struct PruningArgs { pub full: bool, /// Minimum pruning interval measured in blocks. - #[arg(long = "prune.block-interval", alias = "block-interval", value_parser = RangedU64ValueParser::::new().range(1..))] + #[arg(long, value_parser = RangedU64ValueParser::::new().range(1..),)] pub block_interval: Option, // Sender Recovery /// Prunes all sender recovery data. - #[arg(long = "prune.sender-recovery.full", alias = "prune.senderrecovery.full", conflicts_with_all = &["sender_recovery_distance", "sender_recovery_before"])] + #[arg(long = "prune.senderrecovery.full", conflicts_with_all = &["sender_recovery_distance", "sender_recovery_before"])] pub sender_recovery_full: bool, /// Prune sender recovery data before the `head-N` block number. In other words, keep last N + /// 1 blocks. - #[arg(long = "prune.sender-recovery.distance", alias = "prune.senderrecovery.distance", value_name = "BLOCKS", conflicts_with_all = &["sender_recovery_full", "sender_recovery_before"])] + #[arg(long = "prune.senderrecovery.distance", value_name = "BLOCKS", conflicts_with_all = &["sender_recovery_full", "sender_recovery_before"])] pub sender_recovery_distance: Option, /// Prune sender recovery data before the specified block number. The specified block number is /// not pruned. - #[arg(long = "prune.sender-recovery.before", alias = "prune.senderrecovery.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["sender_recovery_full", "sender_recovery_distance"])] + #[arg(long = "prune.senderrecovery.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["sender_recovery_full", "sender_recovery_distance"])] pub sender_recovery_before: Option, // Transaction Lookup /// Prunes all transaction lookup data. - #[arg(long = "prune.transaction-lookup.full", alias = "prune.transactionlookup.full", conflicts_with_all = &["transaction_lookup_distance", "transaction_lookup_before"])] + #[arg(long = "prune.transactionlookup.full", conflicts_with_all = &["transaction_lookup_distance", "transaction_lookup_before"])] pub transaction_lookup_full: bool, /// Prune transaction lookup data before the `head-N` block number. In other words, keep last N /// + 1 blocks. - #[arg(long = "prune.transaction-lookup.distance", alias = "prune.transactionlookup.distance", value_name = "BLOCKS", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_before"])] + #[arg(long = "prune.transactionlookup.distance", value_name = "BLOCKS", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_before"])] pub transaction_lookup_distance: Option, /// Prune transaction lookup data before the specified block number. The specified block number /// is not pruned. - #[arg(long = "prune.transaction-lookup.before", alias = "prune.transactionlookup.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_distance"])] + #[arg(long = "prune.transactionlookup.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_distance"])] pub transaction_lookup_before: Option, // Receipts @@ -61,38 +61,33 @@ pub struct PruningArgs { #[arg(long = "prune.receipts.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance"])] pub receipts_before: Option, /// Receipts Log Filter - #[arg( - long = "prune.receipts-log-filter", - alias = "prune.receiptslogfilter", - value_name = "FILTER_CONFIG", - hide = true - )] + #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", hide = true)] #[deprecated] pub receipts_log_filter: Option, // Account History /// Prunes all account history. - #[arg(long = "prune.account-history.full", alias = "prune.accounthistory.full", conflicts_with_all = &["account_history_distance", "account_history_before"])] + #[arg(long = "prune.accounthistory.full", conflicts_with_all = &["account_history_distance", "account_history_before"])] pub account_history_full: bool, /// Prune account before the `head-N` block number. In other words, keep last N + 1 blocks. - #[arg(long = "prune.account-history.distance", alias = "prune.accounthistory.distance", value_name = "BLOCKS", conflicts_with_all = &["account_history_full", "account_history_before"])] + #[arg(long = "prune.accounthistory.distance", value_name = "BLOCKS", conflicts_with_all = &["account_history_full", "account_history_before"])] pub account_history_distance: Option, /// Prune account history before the specified block number. The specified block number is not /// pruned. - #[arg(long = "prune.account-history.before", alias = "prune.accounthistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["account_history_full", "account_history_distance"])] + #[arg(long = "prune.accounthistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["account_history_full", "account_history_distance"])] pub account_history_before: Option, // Storage History /// Prunes all storage history data. - #[arg(long = "prune.storage-history.full", alias = "prune.storagehistory.full", conflicts_with_all = &["storage_history_distance", "storage_history_before"])] + #[arg(long = "prune.storagehistory.full", conflicts_with_all = &["storage_history_distance", "storage_history_before"])] pub storage_history_full: bool, /// Prune storage history before the `head-N` block number. In other words, keep last N + 1 /// blocks. - #[arg(long = "prune.storage-history.distance", alias = "prune.storagehistory.distance", value_name = "BLOCKS", conflicts_with_all = &["storage_history_full", "storage_history_before"])] + #[arg(long = "prune.storagehistory.distance", value_name = "BLOCKS", conflicts_with_all = &["storage_history_full", "storage_history_before"])] pub storage_history_distance: Option, /// Prune storage history before the specified block number. The specified block number is not /// pruned. - #[arg(long = "prune.storage-history.before", alias = "prune.storagehistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["storage_history_full", "storage_history_distance"])] + #[arg(long = "prune.storagehistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["storage_history_full", "storage_history_distance"])] pub storage_history_before: Option, // Bodies diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs index 5b5e21502d1..45bc9c9029c 100644 --- a/crates/node/core/src/args/trace.rs +++ b/crates/node/core/src/args/trace.rs @@ -1,19 +1,17 @@ //! Opentelemetry tracing configuration through CLI args. use clap::Parser; -use eyre::WrapErr; +use eyre::{ensure, WrapErr}; use reth_tracing::tracing_subscriber::EnvFilter; -use reth_tracing_otlp::OtlpProtocol; use url::Url; /// CLI arguments for configuring `Opentelemetry` trace and span export. #[derive(Debug, Clone, Parser)] pub struct TraceArgs { - /// Enable `Opentelemetry` tracing export to an OTLP endpoint. + /// Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently + /// only http exporting is supported. /// - /// If no value provided, defaults based on protocol: - /// - HTTP: `http://localhost:4318/v1/traces` - /// - gRPC: `http://localhost:4317` + /// If no value provided, defaults to `http://localhost:4318/v1/traces`. /// /// Example: --tracing-otlp=http://collector:4318/v1/traces #[arg( @@ -30,22 +28,6 @@ pub struct TraceArgs { )] pub otlp: Option, - /// OTLP transport protocol to use for exporting traces. - /// - /// - `http`: expects endpoint path to end with `/v1/traces` - /// - `grpc`: expects endpoint without a path - /// - /// Defaults to HTTP if not specified. - #[arg( - long = "tracing-otlp-protocol", - env = "OTEL_EXPORTER_OTLP_PROTOCOL", - global = true, - value_name = "PROTOCOL", - default_value = "http", - help_heading = "Tracing" - )] - pub protocol: OtlpProtocol, - /// Set a filter directive for the OTLP tracer. This controls the verbosity /// of spans and events sent to the OTLP endpoint. It follows the same /// syntax as the `RUST_LOG` environment variable. @@ -65,25 +47,25 @@ pub struct TraceArgs { impl Default for TraceArgs { fn default() -> Self { - Self { - otlp: None, - protocol: OtlpProtocol::Http, - otlp_filter: EnvFilter::from_default_env(), - } + Self { otlp: None, otlp_filter: EnvFilter::from_default_env() } } } -impl TraceArgs { - /// Validate the configuration - pub fn validate(&mut self) -> eyre::Result<()> { - if let Some(url) = &mut self.otlp { - self.protocol.validate_endpoint(url)?; - } - Ok(()) +// Parses and validates an OTLP endpoint url. +fn parse_otlp_endpoint(arg: &str) -> eyre::Result { + let mut url = Url::parse(arg).wrap_err("Invalid URL for OTLP trace output")?; + + // If the path is empty, we set the path. + if url.path() == "/" { + url.set_path("/v1/traces") } -} -// Parses an OTLP endpoint url. -fn parse_otlp_endpoint(arg: &str) -> eyre::Result { - Url::parse(arg).wrap_err("Invalid URL for OTLP trace output") + // OTLP url must end with `/v1/traces` per the OTLP specification. + ensure!( + url.path().ends_with("/v1/traces"), + "OTLP trace endpoint must end with /v1/traces, got path: {}", + url.path() + ); + + Ok(url) } diff --git a/crates/node/events/src/cl.rs b/crates/node/events/src/cl.rs index 99cdc1c245f..bdced7c97d6 100644 --- a/crates/node/events/src/cl.rs +++ b/crates/node/events/src/cl.rs @@ -61,7 +61,7 @@ impl Stream for ConsensusLayerHealthEvents { )) } - // We never received any forkchoice updates. + // We never had both FCU and transition config exchange. return Poll::Ready(Some(ConsensusLayerHealthEvent::NeverSeen)) } } @@ -71,8 +71,12 @@ impl Stream for ConsensusLayerHealthEvents { /// Execution Layer point of view. #[derive(Clone, Copy, Debug)] pub enum ConsensusLayerHealthEvent { - /// Consensus Layer client was never seen (no forkchoice updates received). + /// Consensus Layer client was never seen. NeverSeen, - /// Forkchoice updates from the Consensus Layer client have not been received for a while. + /// Consensus Layer client has not been seen for a while. + HasNotBeenSeenForAWhile(Duration), + /// Updates from the Consensus Layer client were never received. + NeverReceivedUpdates, + /// Updates from the Consensus Layer client have not been received for a while. HaveNotReceivedUpdatesForAWhile(Duration), } diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 02c7709819e..3539eae0316 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -296,6 +296,17 @@ impl NodeState { "Post-merge network, but never seen beacon client. Please launch one to follow the chain!" ) } + ConsensusLayerHealthEvent::HasNotBeenSeenForAWhile(period) => { + warn!( + ?period, + "Post-merge network, but no beacon client seen for a while. Please launch one to follow the chain!" + ) + } + ConsensusLayerHealthEvent::NeverReceivedUpdates => { + warn!( + "Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!" + ) + } ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile(period) => { warn!( ?period, diff --git a/crates/optimism/chainspec/src/basefee.rs b/crates/optimism/chainspec/src/basefee.rs index 394de296f23..0ef712dc04f 100644 --- a/crates/optimism/chainspec/src/basefee.rs +++ b/crates/optimism/chainspec/src/basefee.rs @@ -1,13 +1,26 @@ //! Base fee related utilities for Optimism chains. -use core::cmp::max; - use alloy_consensus::BlockHeader; -use alloy_eips::calc_next_block_base_fee; use op_alloy_consensus::{decode_holocene_extra_data, decode_jovian_extra_data, EIP1559ParamError}; use reth_chainspec::{BaseFeeParams, EthChainSpec}; use reth_optimism_forks::OpHardforks; +fn next_base_fee_params( + chain_spec: impl EthChainSpec + OpHardforks, + parent: &H, + timestamp: u64, + denominator: u32, + elasticity: u32, +) -> u64 { + let base_fee_params = if elasticity == 0 && denominator == 0 { + chain_spec.base_fee_params_at_timestamp(timestamp) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128) + }; + + parent.next_block_base_fee(base_fee_params).unwrap_or_default() +} + /// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header. /// /// Caution: Caller must ensure that holocene is active in the parent header. @@ -23,13 +36,7 @@ where { let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?; - let base_fee_params = if elasticity == 0 && denominator == 0 { - chain_spec.base_fee_params_at_timestamp(timestamp) - } else { - BaseFeeParams::new(denominator as u128, elasticity as u128) - }; - - Ok(parent.next_block_base_fee(base_fee_params).unwrap_or_default()) + Ok(next_base_fee_params(chain_spec, parent, timestamp, denominator, elasticity)) } /// Extracts the Jovian 1599 parameters from the encoded extra data from the parent header. @@ -50,22 +57,8 @@ where { let (elasticity, denominator, min_base_fee) = decode_jovian_extra_data(parent.extra_data())?; - let base_fee_params = if elasticity == 0 && denominator == 0 { - chain_spec.base_fee_params_at_timestamp(timestamp) - } else { - BaseFeeParams::new(denominator as u128, elasticity as u128) - }; - - // Starting from Jovian, we use the maximum of the gas used and the blob gas used to calculate - // the next base fee. - let gas_used = max(parent.gas_used(), parent.blob_gas_used().unwrap_or_default()); - - let next_base_fee = calc_next_block_base_fee( - gas_used, - parent.gas_limit(), - parent.base_fee_per_gas().unwrap_or_default(), - base_fee_params, - ); + let next_base_fee = + next_base_fee_params(chain_spec, parent, timestamp, denominator, elasticity); if next_base_fee < min_base_fee { return Ok(min_base_fee); @@ -73,127 +66,3 @@ where Ok(next_base_fee) } - -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use op_alloy_consensus::encode_jovian_extra_data; - use reth_chainspec::{ChainSpec, ForkCondition, Hardfork}; - use reth_optimism_forks::OpHardfork; - - use crate::{OpChainSpec, BASE_SEPOLIA}; - - use super::*; - - const JOVIAN_TIMESTAMP: u64 = 1900000000; - - fn get_chainspec() -> Arc { - let mut base_sepolia_spec = BASE_SEPOLIA.inner.clone(); - base_sepolia_spec - .hardforks - .insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(JOVIAN_TIMESTAMP)); - Arc::new(OpChainSpec { - inner: ChainSpec { - chain: base_sepolia_spec.chain, - genesis: base_sepolia_spec.genesis, - genesis_header: base_sepolia_spec.genesis_header, - ..Default::default() - }, - }) - } - - #[test] - fn test_next_base_fee_jovian_blob_gas_used_greater_than_gas_used() { - let chain_spec = get_chainspec(); - let mut parent = chain_spec.genesis_header().clone(); - let timestamp = JOVIAN_TIMESTAMP; - - const GAS_LIMIT: u64 = 10_000_000_000; - const BLOB_GAS_USED: u64 = 5_000_000_000; - const GAS_USED: u64 = 1_000_000_000; - const MIN_BASE_FEE: u64 = 100_000_000; - - parent.extra_data = - encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) - .unwrap(); - parent.blob_gas_used = Some(BLOB_GAS_USED); - parent.gas_used = GAS_USED; - parent.gas_limit = GAS_LIMIT; - - let expected_base_fee = calc_next_block_base_fee( - BLOB_GAS_USED, - parent.gas_limit(), - parent.base_fee_per_gas().unwrap_or_default(), - BaseFeeParams::base_sepolia(), - ); - assert_eq!( - expected_base_fee, - compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() - ); - assert_ne!( - expected_base_fee, - calc_next_block_base_fee( - GAS_USED, - parent.gas_limit(), - parent.base_fee_per_gas().unwrap_or_default(), - BaseFeeParams::base_sepolia(), - ) - ) - } - - #[test] - fn test_next_base_fee_jovian_blob_gas_used_less_than_gas_used() { - let chain_spec = get_chainspec(); - let mut parent = chain_spec.genesis_header().clone(); - let timestamp = JOVIAN_TIMESTAMP; - - const GAS_LIMIT: u64 = 10_000_000_000; - const BLOB_GAS_USED: u64 = 100_000_000; - const GAS_USED: u64 = 1_000_000_000; - const MIN_BASE_FEE: u64 = 100_000_000; - - parent.extra_data = - encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) - .unwrap(); - parent.blob_gas_used = Some(BLOB_GAS_USED); - parent.gas_used = GAS_USED; - parent.gas_limit = GAS_LIMIT; - - let expected_base_fee = calc_next_block_base_fee( - GAS_USED, - parent.gas_limit(), - parent.base_fee_per_gas().unwrap_or_default(), - BaseFeeParams::base_sepolia(), - ); - assert_eq!( - expected_base_fee, - compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() - ); - } - - #[test] - fn test_next_base_fee_jovian_min_base_fee() { - let chain_spec = get_chainspec(); - let mut parent = chain_spec.genesis_header().clone(); - let timestamp = JOVIAN_TIMESTAMP; - - const GAS_LIMIT: u64 = 10_000_000_000; - const BLOB_GAS_USED: u64 = 100_000_000; - const GAS_USED: u64 = 1_000_000_000; - const MIN_BASE_FEE: u64 = 5_000_000_000; - - parent.extra_data = - encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) - .unwrap(); - parent.blob_gas_used = Some(BLOB_GAS_USED); - parent.gas_used = GAS_USED; - parent.gas_limit = GAS_LIMIT; - - let expected_base_fee = MIN_BASE_FEE; - assert_eq!( - expected_base_fee, - compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() - ); - } -} diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index eb320045337..6ed24ca5823 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -44,7 +44,6 @@ reth-optimism-evm.workspace = true reth-cli-runner.workspace = true reth-node-builder = { workspace = true, features = ["op"] } reth-tracing.workspace = true -reth-tracing-otlp.workspace = true # eth alloy-eips.workspace = true @@ -56,7 +55,6 @@ alloy-rlp.workspace = true futures-util.workspace = true derive_more.workspace = true serde.workspace = true -url.workspace = true clap = { workspace = true, features = ["derive", "env"] } tokio = { workspace = true, features = ["sync", "macros", "time", "rt-multi-thread"] } @@ -107,5 +105,4 @@ serde = [ "reth-optimism-primitives/serde", "reth-primitives-traits/serde", "reth-optimism-chainspec/serde", - "url/serde", ] diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 8567c2b7e5a..621d16c7e13 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -9,10 +9,8 @@ use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_node::{OpExecutorProvider, OpNode}; use reth_rpc_server_types::RpcModuleValidator; use reth_tracing::{FileWorkerGuard, Layers}; -use reth_tracing_otlp::OtlpProtocol; use std::{fmt, sync::Arc}; use tracing::info; -use url::Url; /// A wrapper around a parsed CLI that handles command execution. #[derive(Debug)] @@ -65,8 +63,7 @@ where self.cli.logs.log_file_directory.join(chain_spec.chain.to_string()); } - self.init_tracing(&runner)?; - + self.init_tracing()?; // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); @@ -117,18 +114,18 @@ where /// Initializes tracing with the configured options. /// /// If file logging is enabled, this function stores guard to the struct. - /// For gRPC OTLP, it requires tokio runtime context. - pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> { + pub fn init_tracing(&mut self) -> Result<()> { if self.guard.is_none() { let mut layers = self.layers.take().unwrap_or_default(); #[cfg(feature = "otlp")] - { - self.cli.traces.validate()?; - if let Some(endpoint) = &self.cli.traces.otlp { - info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", endpoint); - self.init_otlp_export(&mut layers, endpoint, runner)?; - } + if let Some(output_type) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); + layers.with_span_layer( + "reth".to_string(), + output_type.clone(), + self.cli.traces.otlp_filter.clone(), + )?; } self.guard = self.cli.logs.init_tracing_with_layers(layers)?; @@ -136,33 +133,4 @@ where } Ok(()) } - - /// Initialize OTLP tracing export based on protocol type. - /// - /// For gRPC, `block_on` is required because tonic's channel initialization needs - /// a tokio runtime context, even though `with_span_layer` itself is not async. - #[cfg(feature = "otlp")] - fn init_otlp_export( - &self, - layers: &mut Layers, - endpoint: &Url, - runner: &CliRunner, - ) -> Result<()> { - let endpoint = endpoint.clone(); - let protocol = self.cli.traces.protocol; - let level_filter = self.cli.traces.otlp_filter.clone(); - - match protocol { - OtlpProtocol::Grpc => { - runner.block_on(async { - layers.with_span_layer("reth".to_string(), endpoint, level_filter, protocol) - })?; - } - OtlpProtocol::Http => { - layers.with_span_layer("reth".to_string(), endpoint, level_filter, protocol)?; - } - } - - Ok(()) - } } diff --git a/crates/optimism/evm/src/build.rs b/crates/optimism/evm/src/build.rs index 42552b2a5f2..b04d62602ad 100644 --- a/crates/optimism/evm/src/build.rs +++ b/crates/optimism/evm/src/build.rs @@ -46,7 +46,7 @@ impl OpBlockAssembler { evm_env, execution_ctx: ctx, transactions, - output: BlockExecutionResult { receipts, gas_used, blob_gas_used, requests: _ }, + output: BlockExecutionResult { receipts, gas_used, .. }, bundle_state, state_root, state_provider, @@ -80,11 +80,7 @@ impl OpBlockAssembler { }; let (excess_blob_gas, blob_gas_used) = - if self.chain_spec.is_jovian_active_at_timestamp(timestamp) { - // In jovian, we're using the blob gas used field to store the current da - // footprint's value. - (Some(0), Some(*blob_gas_used)) - } else if self.chain_spec.is_ecotone_active_at_timestamp(timestamp) { + if self.chain_spec.is_ecotone_active_at_timestamp(timestamp) { (Some(0), Some(0)) } else { (None, None) diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index 1a8e76c1490..9b694243fac 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -38,9 +38,6 @@ pub enum L1BlockInfoError { /// Operator fee constant conversion error #[error("could not convert operator fee constant")] OperatorFeeConstantConversion, - /// DA foootprint gas scalar constant conversion error - #[error("could not convert DA footprint gas scalar constant")] - DaFootprintGasScalarConversion, /// Optimism hardforks not active #[error("Optimism hardforks are not active")] HardforksNotActive, diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 2afe6e9d3a2..4165221c987 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -2,7 +2,7 @@ use crate::{error::L1BlockInfoError, revm_spec_by_timestamp_after_bedrock, OpBlockExecutionError}; use alloy_consensus::Transaction; -use alloy_primitives::{hex, U16, U256}; +use alloy_primitives::{hex, U256}; use op_revm::L1BlockInfo; use reth_execution_errors::BlockExecutionError; use reth_optimism_forks::OpHardforks; @@ -14,10 +14,6 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// The function selector of the "setL1BlockValuesIsthmus" function in the `L1Block` contract. const L1_BLOCK_ISTHMUS_SELECTOR: [u8; 4] = hex!("098999be"); -/// The function selector of the "setL1BlockValuesJovian" function in the `L1Block` contract. -/// This is the first 4 bytes of `keccak256("setL1BlockValuesJovian()")`. -const L1_BLOCK_JOVIAN_SELECTOR: [u8; 4] = hex!("3db6be2b"); - /// Extracts the [`L1BlockInfo`] from the L2 block. The L1 info transaction is always the first /// transaction in the L2 block. /// @@ -56,14 +52,11 @@ pub fn extract_l1_info_from_tx( /// If the input is shorter than 4 bytes. pub fn parse_l1_info(input: &[u8]) -> Result { // Parse the L1 info transaction into an L1BlockInfo struct, depending on the function selector. - // There are currently 4 variants: - // - Jovian + // There are currently 3 variants: // - Isthmus // - Ecotone // - Bedrock - if input[0..4] == L1_BLOCK_JOVIAN_SELECTOR { - parse_l1_info_tx_jovian(input[4..].as_ref()) - } else if input[0..4] == L1_BLOCK_ISTHMUS_SELECTOR { + if input[0..4] == L1_BLOCK_ISTHMUS_SELECTOR { parse_l1_info_tx_isthmus(input[4..].as_ref()) } else if input[0..4] == L1_BLOCK_ECOTONE_SELECTOR { parse_l1_info_tx_ecotone(input[4..].as_ref()) @@ -95,12 +88,14 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result Result Result Result { - if data.len() != 174 { - return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::UnexpectedCalldataLength)); - } - - // https://github.com/ethereum-optimism/op-geth/blob/60038121c7571a59875ff9ed7679c48c9f73405d/core/types/rollup_cost.go#L317-L328 - // - // data layout assumed for Ecotone: - // offset type varname - // 0 - // 4 uint32 _basefeeScalar (start offset in this scope) - // 8 uint32 _blobBaseFeeScalar - // 12 uint64 _sequenceNumber, - // 20 uint64 _timestamp, - // 28 uint64 _l1BlockNumber - // 36 uint256 _basefee, - // 68 uint256 _blobBaseFee, - // 100 bytes32 _hash, - // 132 bytes32 _batcherHash, - // 164 uint32 _operatorFeeScalar - // 168 uint64 _operatorFeeConstant - // 176 uint16 _daFootprintGasScalar - - let l1_base_fee_scalar = U256::try_from_be_slice(&data[..4]) - .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeScalarConversion))?; - let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[4..8]).ok_or({ - OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeScalarConversion) - })?; - let l1_base_fee = U256::try_from_be_slice(&data[32..64]) - .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeConversion))?; - let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96]) - .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeConversion))?; - let operator_fee_scalar = U256::try_from_be_slice(&data[160..164]).ok_or({ - OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeScalarConversion) - })?; - let operator_fee_constant = U256::try_from_be_slice(&data[164..172]).ok_or({ - OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeConstantConversion) - })?; - let da_footprint_gas_scalar: u16 = U16::try_from_be_slice(&data[172..174]) - .ok_or({ - OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::DaFootprintGasScalarConversion) - })? - .to(); + }; - Ok(L1BlockInfo { - l1_base_fee, - l1_base_fee_scalar, - l1_blob_base_fee: Some(l1_blob_base_fee), - l1_blob_base_fee_scalar: Some(l1_blob_base_fee_scalar), - operator_fee_scalar: Some(operator_fee_scalar), - operator_fee_constant: Some(operator_fee_constant), - da_footprint_gas_scalar: Some(da_footprint_gas_scalar), - ..Default::default() - }) + Ok(l1block) } /// An extension trait for [`L1BlockInfo`] that allows us to calculate the L1 cost of a transaction @@ -354,7 +282,6 @@ mod tests { use super::*; use alloy_consensus::{Block, BlockBody}; use alloy_eips::eip2718::Decodable2718; - use alloy_primitives::keccak256; use reth_optimism_chainspec::OP_MAINNET; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::OpTransactionSigned; @@ -381,12 +308,6 @@ mod tests { assert_eq!(l1_info.l1_blob_base_fee_scalar, None); } - #[test] - fn test_verify_set_jovian() { - let hash = &keccak256("setL1BlockValuesJovian()")[..4]; - assert_eq!(hash, L1_BLOCK_JOVIAN_SELECTOR) - } - #[test] fn sanity_l1_block_ecotone() { // rig @@ -487,33 +408,4 @@ mod tests { assert_eq!(l1_block_info.operator_fee_scalar, operator_fee_scalar); assert_eq!(l1_block_info.operator_fee_constant, operator_fee_constant); } - - #[test] - fn parse_l1_info_jovian() { - // L1 block info from a devnet with Isthmus activated - const DATA: &[u8] = &hex!( - "3db6be2b00000558000c5fc500000000000000030000000067a9f765000000000000002900000000000000000000000000000000000000000000000000000000006a6d09000000000000000000000000000000000000000000000000000000000000000172fcc8e8886636bdbe96ba0e4baab67ea7e7811633f52b52e8cf7a5123213b6f000000000000000000000000d3f2c5afb2d76f5579f326b0cd7da5f5a4126c3500004e2000000000000001f4dead" - ); - - // expected l1 block info verified against expected l1 fee and operator fee for tx. - let l1_base_fee = U256::from(6974729); - let l1_base_fee_scalar = U256::from(1368); - let l1_blob_base_fee = Some(U256::from(1)); - let l1_blob_base_fee_scalar = Some(U256::from(810949)); - let operator_fee_scalar = Some(U256::from(20000)); - let operator_fee_constant = Some(U256::from(500)); - let da_footprint_gas_scalar: Option = Some(U16::from(0xdead).to()); - - // test - - let l1_block_info = parse_l1_info(DATA).unwrap(); - - assert_eq!(l1_block_info.l1_base_fee, l1_base_fee); - assert_eq!(l1_block_info.l1_base_fee_scalar, l1_base_fee_scalar); - assert_eq!(l1_block_info.l1_blob_base_fee, l1_blob_base_fee); - assert_eq!(l1_block_info.l1_blob_base_fee_scalar, l1_blob_base_fee_scalar); - assert_eq!(l1_block_info.operator_fee_scalar, operator_fee_scalar); - assert_eq!(l1_block_info.operator_fee_constant, operator_fee_constant); - assert_eq!(l1_block_info.da_footprint_gas_scalar, da_footprint_gas_scalar); - } } diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index e75075a12cf..8d1875fe753 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -44,7 +44,6 @@ op-alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true alloy-rpc-types-debug.workspace = true alloy-consensus.workspace = true -alloy-evm.workspace = true # misc derive_more.workspace = true diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index ab367db7f2d..ad8152123b7 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,4 +1,5 @@ //! Optimism payload builder implementation. + use crate::{ config::{OpBuilderConfig, OpDAConfig}, error::OpPayloadBuilderError, @@ -6,7 +7,6 @@ use crate::{ OpAttributes, OpPayloadBuilderAttributes, OpPayloadPrimitives, }; use alloy_consensus::{BlockHeader, Transaction, Typed2718}; -use alloy_evm::Evm as AlloyEvm; use alloy_primitives::{B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; @@ -14,12 +14,10 @@ use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ - block::BlockExecutorFor, execute::{ BlockBuilder, BlockBuilderOutcome, BlockExecutionError, BlockExecutor, BlockValidationError, }, - op_revm::{constants::L1_BLOCK_CONTRACT, L1BlockInfo}, - ConfigureEvm, Database, + ConfigureEvm, Database, Evm, }; use reth_execution_types::ExecutionOutcome; use reth_optimism_forks::OpHardforks; @@ -346,11 +344,6 @@ impl OpBuilder<'_, Txs> { let mut db = BalDatabase::new(State::builder().with_database(db).with_bundle_update().build()); - // Load the L1 block contract into the database cache. If the L1 block contract is not - // pre-loaded the database will panic when trying to fetch the DA footprint gas - // scalar. - db.load_cache_account(L1_BLOCK_CONTRACT).map_err(BlockExecutionError::other)?; - let mut builder = ctx.block_builder(&mut db)?; // 1. apply pre-execution changes @@ -522,27 +515,17 @@ impl ExecutionInfo { tx_data_limit: Option, block_data_limit: Option, tx_gas_limit: u64, - da_footprint_gas_scalar: Option, ) -> bool { if tx_data_limit.is_some_and(|da_limit| tx_da_size > da_limit) { return true; } - let total_da_bytes_used = self.cumulative_da_bytes_used.saturating_add(tx_da_size); - - if block_data_limit.is_some_and(|da_limit| total_da_bytes_used > da_limit) { + if block_data_limit + .is_some_and(|da_limit| self.cumulative_da_bytes_used + tx_da_size > da_limit) + { return true; } - // Post Jovian: the tx DA footprint must be less than the block gas limit - if let Some(da_footprint_gas_scalar) = da_footprint_gas_scalar { - let tx_da_footprint = - total_da_bytes_used.saturating_mul(da_footprint_gas_scalar as u64); - if tx_da_footprint > block_gas_limit { - return true; - } - } - self.cumulative_gas_used + tx_gas_limit > block_gas_limit } } @@ -672,18 +655,14 @@ where /// Executes the given best transactions and updates the execution info. /// /// Returns `Ok(Some(())` if the job was cancelled. - pub fn execute_best_transactions( + pub fn execute_best_transactions( &self, info: &mut ExecutionInfo, - builder: &mut Builder, + builder: &mut impl BlockBuilder, mut best_txs: impl PayloadTransactions< Transaction: PoolTransaction> + OpPooledTx, >, - ) -> Result, PayloadBuilderError> - where - Builder: BlockBuilder, - <::Evm as AlloyEvm>::DB: Database, - { + ) -> Result, PayloadBuilderError> { let block_gas_limit = builder.evm_mut().block().gas_limit(); let block_da_limit = self.da_config.max_da_block_size(); let tx_da_limit = self.da_config.max_da_tx_size(); @@ -693,23 +672,12 @@ where let interop = tx.interop_deadline(); let tx_da_size = tx.estimated_da_size(); let tx = tx.into_consensus(); - - let da_footprint_gas_scalar = self - .chain_spec - .is_jovian_active_at_timestamp(self.attributes().timestamp()) - .then_some( - L1BlockInfo::fetch_da_footprint_gas_scalar(builder.evm_mut().db_mut()).expect( - "DA footprint should always be available from the database post jovian", - ), - ); - if info.is_tx_over_limits( tx_da_size, block_gas_limit, tx_da_limit, block_da_limit, tx.gas_limit(), - da_footprint_gas_scalar, ) { // we can't fit this transaction into the block, so we need to mark it as // invalid which also removes all dependent transaction from diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 5d1e8e29794..f8910c22a33 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -131,14 +131,10 @@ pub struct OpReceiptFieldsBuilder { pub l1_blob_base_fee: Option, /// The current L1 blob base fee scalar. pub l1_blob_base_fee_scalar: Option, - /* ---------------------------------------- Isthmus ---------------------------------------- */ /// The current operator fee scalar. pub operator_fee_scalar: Option, /// The current L1 blob base fee scalar. pub operator_fee_constant: Option, - /* ---------------------------------------- Jovian ----------------------------------------- */ - /// The current DA footprint gas scalar. - pub da_footprint_gas_scalar: Option, } impl OpReceiptFieldsBuilder { @@ -158,7 +154,6 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar: None, operator_fee_scalar: None, operator_fee_constant: None, - da_footprint_gas_scalar: None, } } @@ -210,8 +205,6 @@ impl OpReceiptFieldsBuilder { l1_block_info.operator_fee_constant.map(|constant| constant.saturating_to()); } - self.da_footprint_gas_scalar = l1_block_info.da_footprint_gas_scalar; - Ok(self) } @@ -243,7 +236,6 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, - da_footprint_gas_scalar, } = self; OpTransactionReceiptFields { @@ -257,7 +249,7 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, - da_footprint_gas_scalar, + da_footprint_gas_scalar: None, }, deposit_nonce, deposit_receipt_version, @@ -417,7 +409,7 @@ mod test { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, - da_footprint_gas_scalar, + .. } = receipt_meta.l1_block_info; assert_eq!( @@ -461,11 +453,6 @@ mod test { TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.operator_fee_constant, "incorrect operator fee constant" ); - assert_eq!( - da_footprint_gas_scalar, - TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.da_footprint_gas_scalar, - "incorrect da footprint gas scalar" - ); } #[test] @@ -553,7 +540,7 @@ mod test { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, - da_footprint_gas_scalar, + .. } = receipt_meta.l1_block_info; assert_eq!(l1_gas_price, Some(14121491676), "incorrect l1 base fee (former gas price)"); @@ -565,6 +552,5 @@ mod test { assert_eq!(l1_blob_base_fee_scalar, Some(1055762), "incorrect l1 blob base fee scalar"); assert_eq!(operator_fee_scalar, None, "incorrect operator fee scalar"); assert_eq!(operator_fee_constant, None, "incorrect operator fee constant"); - assert_eq!(da_footprint_gas_scalar, None, "incorrect da footprint gas scalar"); } } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 37c05815a61..aa7e8ea60bd 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -95,8 +95,8 @@ where let this = self.clone(); let timeout_duration = self.send_raw_transaction_sync_timeout(); async move { - let mut canonical_stream = this.provider().canonical_state_stream(); let hash = EthTransactions::send_raw_transaction(&this, tx).await?; + let mut canonical_stream = this.provider().canonical_state_stream(); let flashblock_rx = this.pending_block_rx(); let mut flashblock_stream = flashblock_rx.map(WatchStream::new); diff --git a/crates/optimism/txpool/src/validator.rs b/crates/optimism/txpool/src/validator.rs index 0cec4482a32..631c4255942 100644 --- a/crates/optimism/txpool/src/validator.rs +++ b/crates/optimism/txpool/src/validator.rs @@ -143,8 +143,8 @@ where self.block_info.timestamp.store(header.timestamp(), Ordering::Relaxed); self.block_info.number.store(header.number(), Ordering::Relaxed); - if let Some(Ok(l1_block_info)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) { - *self.block_info.l1_block_info.write() = l1_block_info; + if let Some(Ok(cost_addition)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) { + *self.block_info.l1_block_info.write() = cost_addition; } if self.chain_spec().is_interop_active_at_timestamp(header.timestamp()) { diff --git a/crates/prune/db/Cargo.toml b/crates/prune/db/Cargo.toml deleted file mode 100644 index 269a87bf7b6..00000000000 --- a/crates/prune/db/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "reth-prune-db" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -exclude.workspace = true -description = "Database integration with prune implementation" - -[dependencies] - -[lints] -workspace = true diff --git a/crates/prune/db/src/lib.rs b/crates/prune/db/src/lib.rs deleted file mode 100644 index ef777085e54..00000000000 --- a/crates/prune/db/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -//! An integration of `reth-prune` with `reth-db`. diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 6766ec43fb0..046acbda544 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -1,4 +1,5 @@ //! Compatibility functions for rpc `Transaction` type. + use crate::{ fees::{CallFees, CallFeesError}, RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, SignableTxRequest, diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 2cbf1aff14e..81909b3f36e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -91,8 +91,8 @@ pub trait EthTransactions: LoadTransaction { let this = self.clone(); let timeout_duration = self.send_raw_transaction_sync_timeout(); async move { - let mut stream = this.provider().canonical_state_stream(); let hash = EthTransactions::send_raw_transaction(&this, tx).await?; + let mut stream = this.provider().canonical_state_stream(); tokio::time::timeout(timeout_duration, async { while let Some(notification) = stream.next().await { let chain = notification.committed(); diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index af5e1ae2ef9..ce548230864 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -14,7 +14,6 @@ use reth_network_types::PeerKind; use reth_rpc_api::AdminApiServer; use reth_rpc_server_types::ToRpcResult; use reth_transaction_pool::TransactionPool; -use revm_primitives::keccak256; /// `admin` API implementation. /// @@ -75,25 +74,34 @@ where let mut infos = Vec::with_capacity(peers.len()); for peer in peers { - infos.push(PeerInfo { - id: keccak256(peer.remote_id.as_slice()).to_string(), - name: peer.client_version.to_string(), - enode: peer.enode, - enr: peer.enr, - caps: peer.capabilities.capabilities().iter().map(|cap| cap.to_string()).collect(), - network: PeerNetworkInfo { - remote_address: peer.remote_addr, - local_address: peer.local_addr.unwrap_or_else(|| self.network.local_addr()), - inbound: peer.direction.is_incoming(), - trusted: peer.kind.is_trusted(), - static_node: peer.kind.is_static(), - }, - protocols: PeerProtocolInfo { - eth: Some(EthPeerInfo::Info(EthInfo { version: peer.status.version as u64 })), - snap: None, - other: Default::default(), - }, - }) + if let Ok(pk) = id2pk(peer.remote_id) { + infos.push(PeerInfo { + id: pk.to_string(), + name: peer.client_version.to_string(), + enode: peer.enode, + enr: peer.enr, + caps: peer + .capabilities + .capabilities() + .iter() + .map(|cap| cap.to_string()) + .collect(), + network: PeerNetworkInfo { + remote_address: peer.remote_addr, + local_address: peer.local_addr.unwrap_or_else(|| self.network.local_addr()), + inbound: peer.direction.is_incoming(), + trusted: peer.kind.is_trusted(), + static_node: peer.kind.is_static(), + }, + protocols: PeerProtocolInfo { + eth: Some(EthPeerInfo::Info(EthInfo { + version: peer.status.version as u64, + })), + snap: None, + other: Default::default(), + }, + }) + } } Ok(infos) diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs new file mode 100644 index 00000000000..990e33ee52a --- /dev/null +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -0,0 +1,54 @@ +use crate::segments::Segment; +use alloy_primitives::BlockNumber; +use reth_codecs::Compact; +use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{providers::StaticFileWriter, DBProvider, StaticFileProviderFactory}; +use reth_static_file_types::StaticFileSegment; +use reth_storage_errors::provider::ProviderResult; +use std::ops::RangeInclusive; + +/// Static File segment responsible for [`StaticFileSegment::Headers`] part of data. +#[derive(Debug, Default)] +pub struct Headers; + +impl Segment for Headers +where + Provider: StaticFileProviderFactory> + + DBProvider, +{ + fn segment(&self) -> StaticFileSegment { + StaticFileSegment::Headers + } + + fn copy_to_static_files( + &self, + provider: Provider, + block_range: RangeInclusive, + ) -> ProviderResult<()> { + let static_file_provider = provider.static_file_provider(); + let mut static_file_writer = + static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Headers)?; + + let mut headers_cursor = provider + .tx_ref() + .cursor_read::::BlockHeader>>( + )?; + let headers_walker = headers_cursor.walk_range(block_range.clone())?; + + let mut canonical_headers_cursor = + provider.tx_ref().cursor_read::()?; + let canonical_headers_walker = canonical_headers_cursor.walk_range(block_range)?; + + for (header_entry, canonical_header_entry) in headers_walker.zip(canonical_headers_walker) { + let (header_block, header) = header_entry?; + let (canonical_header_block, canonical_header) = canonical_header_entry?; + + debug_assert_eq!(header_block, canonical_header_block); + + static_file_writer.append_header(&header, &canonical_header)?; + } + + Ok(()) + } +} diff --git a/crates/static-file/static-file/src/segments/mod.rs b/crates/static-file/static-file/src/segments/mod.rs index a1499a2eaa8..fc79effdd5a 100644 --- a/crates/static-file/static-file/src/segments/mod.rs +++ b/crates/static-file/static-file/src/segments/mod.rs @@ -1,5 +1,11 @@ //! `StaticFile` segment implementations and utilities. +mod transactions; +pub use transactions::Transactions; + +mod headers; +pub use headers::Headers; + mod receipts; pub use receipts::Receipts; diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs new file mode 100644 index 00000000000..74cb58ed708 --- /dev/null +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -0,0 +1,60 @@ +use crate::segments::Segment; +use alloy_primitives::BlockNumber; +use reth_codecs::Compact; +use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, +}; +use reth_static_file_types::StaticFileSegment; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use std::ops::RangeInclusive; + +/// Static File segment responsible for [`StaticFileSegment::Transactions`] part of data. +#[derive(Debug, Default)] +pub struct Transactions; + +impl Segment for Transactions +where + Provider: StaticFileProviderFactory> + + DBProvider + + BlockReader, +{ + fn segment(&self) -> StaticFileSegment { + StaticFileSegment::Transactions + } + + /// Write transactions from database table [`tables::Transactions`] to static files with segment + /// [`StaticFileSegment::Transactions`] for the provided block range. + fn copy_to_static_files( + &self, + provider: Provider, + block_range: RangeInclusive, + ) -> ProviderResult<()> { + let static_file_provider = provider.static_file_provider(); + let mut static_file_writer = static_file_provider + .get_writer(*block_range.start(), StaticFileSegment::Transactions)?; + + for block in block_range { + static_file_writer.increment_block(block)?; + + let block_body_indices = provider + .block_body_indices(block)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; + + let mut transactions_cursor = provider.tx_ref().cursor_read::::SignedTx, + >>()?; + let transactions_walker = + transactions_cursor.walk_range(block_body_indices.tx_num_range())?; + + for entry in transactions_walker { + let (tx_number, transaction) = entry?; + + static_file_writer.append_transaction(tx_number, &transaction)?; + } + } + + Ok(()) + } +} diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 2e7aa4b9df4..185fbf7c498 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -131,6 +131,12 @@ where let mut segments = Vec::<(Box>, RangeInclusive)>::new(); + if let Some(block_range) = targets.transactions.clone() { + segments.push((Box::new(segments::Transactions), block_range)); + } + if let Some(block_range) = targets.headers.clone() { + segments.push((Box::new(segments::Headers), block_range)); + } if let Some(block_range) = targets.receipts.clone() { segments.push((Box::new(segments::Receipts), block_range)); } @@ -172,11 +178,16 @@ where /// Returns highest block numbers for all static file segments. pub fn copy_to_static_files(&self) -> ProviderResult { let provider = self.provider.database_provider_ro()?; - let stages_checkpoints = std::iter::once(StageId::Execution) + let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] + .into_iter() .map(|stage| provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number))) .collect::, _>>()?; - let highest_static_files = HighestStaticFiles { receipts: stages_checkpoints[0] }; + let highest_static_files = HighestStaticFiles { + headers: stages_checkpoints[0], + receipts: stages_checkpoints[1], + transactions: stages_checkpoints[2], + }; let targets = self.get_static_file_targets(highest_static_files)?; self.run(targets)?; @@ -193,17 +204,26 @@ where let highest_static_files = self.provider.static_file_provider().get_highest_static_files(); let targets = StaticFileTargets { - // StaticFile receipts only if they're not pruned according to the user configuration - receipts: if self.prune_modes.receipts.is_none() { - finalized_block_numbers.receipts.and_then(|finalized_block_number| { + headers: finalized_block_numbers.headers.and_then(|finalized_block_number| { + self.get_static_file_target(highest_static_files.headers, finalized_block_number) + }), + receipts: finalized_block_numbers + .receipts + // StaticFile receipts only if they're not pruned according to the user + // configuration + .filter(|_| !self.prune_modes.has_receipts_pruning()) + .and_then(|finalized_block_number| { self.get_static_file_target( highest_static_files.receipts, finalized_block_number, ) - }) - } else { - None - }, + }), + transactions: finalized_block_numbers.transactions.and_then(|finalized_block_number| { + self.get_static_file_target( + highest_static_files.transactions, + finalized_block_number, + ) + }), }; trace!( @@ -293,36 +313,69 @@ mod tests { StaticFileProducerInner::new(provider_factory.clone(), PruneModes::default()); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { receipts: Some(1) }) + .get_static_file_targets(HighestStaticFiles { + headers: Some(1), + receipts: Some(1), + transactions: Some(1), + }) .expect("get static file targets"); - assert_eq!(targets, StaticFileTargets { receipts: Some(0..=1) }); + assert_eq!( + targets, + StaticFileTargets { + headers: Some(0..=1), + receipts: Some(0..=1), + transactions: Some(0..=1) + } + ); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { receipts: Some(1) } + HighestStaticFiles { headers: Some(1), receipts: Some(1), transactions: Some(1) } ); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { receipts: Some(3) }) + .get_static_file_targets(HighestStaticFiles { + headers: Some(3), + receipts: Some(3), + transactions: Some(3), + }) .expect("get static file targets"); - assert_eq!(targets, StaticFileTargets { receipts: Some(2..=3) }); + assert_eq!( + targets, + StaticFileTargets { + headers: Some(2..=3), + receipts: Some(2..=3), + transactions: Some(2..=3) + } + ); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { receipts: Some(3) } + HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } ); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { receipts: Some(4) }) + .get_static_file_targets(HighestStaticFiles { + headers: Some(4), + receipts: Some(4), + transactions: Some(4), + }) .expect("get static file targets"); - assert_eq!(targets, StaticFileTargets { receipts: Some(4..=4) }); + assert_eq!( + targets, + StaticFileTargets { + headers: Some(4..=4), + receipts: Some(4..=4), + transactions: Some(4..=4) + } + ); assert_matches!( static_file_producer.run(targets), Err(ProviderError::BlockBodyIndicesNotFound(4)) ); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { receipts: Some(3) } + HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } ); } @@ -346,7 +399,11 @@ mod tests { std::thread::sleep(Duration::from_millis(100)); } let targets = locked_producer - .get_static_file_targets(HighestStaticFiles { receipts: Some(1) }) + .get_static_file_targets(HighestStaticFiles { + headers: Some(1), + receipts: Some(1), + transactions: Some(1), + }) .expect("get static file targets"); assert_matches!(locked_producer.run(targets.clone()), Ok(_)); tx.send(targets).unwrap(); diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 9606b0ec98b..53be4f6d1c1 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -27,15 +27,39 @@ pub const DEFAULT_BLOCKS_PER_STATIC_FILE: u64 = 500_000; /// Highest static file block numbers, per data segment. #[derive(Debug, Clone, Copy, Default, Eq, PartialEq)] pub struct HighestStaticFiles { + /// Highest static file block of headers, inclusive. + /// If [`None`], no static file is available. + pub headers: Option, /// Highest static file block of receipts, inclusive. /// If [`None`], no static file is available. pub receipts: Option, + /// Highest static file block of transactions, inclusive. + /// If [`None`], no static file is available. + pub transactions: Option, } impl HighestStaticFiles { + /// Returns the highest static file if it exists for a segment + pub const fn highest(&self, segment: StaticFileSegment) -> Option { + match segment { + StaticFileSegment::Headers => self.headers, + StaticFileSegment::Transactions => self.transactions, + StaticFileSegment::Receipts => self.receipts, + } + } + + /// Returns a mutable reference to a static file segment + pub const fn as_mut(&mut self, segment: StaticFileSegment) -> &mut Option { + match segment { + StaticFileSegment::Headers => &mut self.headers, + StaticFileSegment::Transactions => &mut self.transactions, + StaticFileSegment::Receipts => &mut self.receipts, + } + } + /// Returns an iterator over all static file segments fn iter(&self) -> impl Iterator> { - [self.receipts].into_iter() + [self.headers, self.transactions, self.receipts].into_iter() } /// Returns the minimum block of all segments. @@ -52,28 +76,36 @@ impl HighestStaticFiles { /// Static File targets, per data segment, measured in [`BlockNumber`]. #[derive(Debug, Clone, Eq, PartialEq)] pub struct StaticFileTargets { + /// Targeted range of headers. + pub headers: Option>, /// Targeted range of receipts. pub receipts: Option>, + /// Targeted range of transactions. + pub transactions: Option>, } impl StaticFileTargets { /// Returns `true` if any of the targets are [Some]. pub const fn any(&self) -> bool { - self.receipts.is_some() + self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() } /// Returns `true` if all targets are either [`None`] or has beginning of the range equal to the /// highest static file. pub fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { - core::iter::once(&(self.receipts.as_ref(), static_files.receipts)).all( - |(target_block_range, highest_static_file_block)| { - target_block_range.is_none_or(|target_block_range| { - *target_block_range.start() == - highest_static_file_block - .map_or(0, |highest_static_file_block| highest_static_file_block + 1) - }) - }, - ) + [ + (self.headers.as_ref(), static_files.headers), + (self.receipts.as_ref(), static_files.receipts), + (self.transactions.as_ref(), static_files.transactions), + ] + .iter() + .all(|(target_block_range, highest_static_file_block)| { + target_block_range.is_none_or(|target_block_range| { + *target_block_range.start() == + highest_static_file_block + .map_or(0, |highest_static_file_block| highest_static_file_block + 1) + }) + }) } } @@ -91,9 +123,42 @@ pub const fn find_fixed_range( mod tests { use super::*; + #[test] + fn test_highest_static_files_highest() { + let files = + HighestStaticFiles { headers: Some(100), receipts: Some(200), transactions: None }; + + // Test for headers segment + assert_eq!(files.highest(StaticFileSegment::Headers), Some(100)); + + // Test for receipts segment + assert_eq!(files.highest(StaticFileSegment::Receipts), Some(200)); + + // Test for transactions segment + assert_eq!(files.highest(StaticFileSegment::Transactions), None); + } + + #[test] + fn test_highest_static_files_as_mut() { + let mut files = HighestStaticFiles::default(); + + // Modify headers value + *files.as_mut(StaticFileSegment::Headers) = Some(150); + assert_eq!(files.headers, Some(150)); + + // Modify receipts value + *files.as_mut(StaticFileSegment::Receipts) = Some(250); + assert_eq!(files.receipts, Some(250)); + + // Modify transactions value + *files.as_mut(StaticFileSegment::Transactions) = Some(350); + assert_eq!(files.transactions, Some(350)); + } + #[test] fn test_highest_static_files_min() { - let files = HighestStaticFiles { receipts: Some(100) }; + let files = + HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: None }; // Minimum value among the available segments assert_eq!(files.min_block_num(), Some(100)); @@ -105,10 +170,11 @@ mod tests { #[test] fn test_highest_static_files_max() { - let files = HighestStaticFiles { receipts: Some(100) }; + let files = + HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: Some(500) }; // Maximum value among the available segments - assert_eq!(files.max_block_num(), Some(100)); + assert_eq!(files.max_block_num(), Some(500)); let empty_files = HighestStaticFiles::default(); // No values, should return None diff --git a/crates/storage/errors/src/lib.rs b/crates/storage/errors/src/lib.rs index eca6cd47a45..1a09d745140 100644 --- a/crates/storage/errors/src/lib.rs +++ b/crates/storage/errors/src/lib.rs @@ -21,5 +21,8 @@ pub mod lockfile; pub mod provider; pub use provider::{ProviderError, ProviderResult}; +/// Writer error +pub mod writer; + /// Any error pub mod any; diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index ed5230c18fb..9630a1b2a64 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,4 +1,4 @@ -use crate::{any::AnyError, db::DatabaseError}; +use crate::{any::AnyError, db::DatabaseError, writer::UnifiedStorageWriterError}; use alloc::{boxed::Box, string::String}; use alloy_eips::{BlockHashOrNumber, HashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; @@ -125,6 +125,9 @@ pub enum ProviderError { /// Consistent view error. #[error("failed to initialize consistent view: {_0}")] ConsistentView(Box), + /// Storage writer error. + #[error(transparent)] + UnifiedStorageWriterError(#[from] UnifiedStorageWriterError), /// Received invalid output from configured storage implementation. #[error("received invalid output from storage")] InvalidStorageOutput, diff --git a/crates/storage/errors/src/writer.rs b/crates/storage/errors/src/writer.rs new file mode 100644 index 00000000000..52a5ba06e5e --- /dev/null +++ b/crates/storage/errors/src/writer.rs @@ -0,0 +1,24 @@ +use crate::db::DatabaseError; +use reth_static_file_types::StaticFileSegment; + +/// `UnifiedStorageWriter` related errors +#[derive(Clone, Debug, derive_more::Display, PartialEq, Eq, derive_more::Error)] +pub enum UnifiedStorageWriterError { + /// Database writer is missing + #[display("Database writer is missing")] + MissingDatabaseWriter, + /// Static file writer is missing + #[display("Static file writer is missing")] + MissingStaticFileWriter, + /// Static file writer is of wrong segment + #[display("Static file writer is of wrong segment: got {_0}, expected {_1}")] + IncorrectStaticFileWriter(StaticFileSegment, StaticFileSegment), + /// Database-related errors. + Database(DatabaseError), +} + +impl From for UnifiedStorageWriterError { + fn from(error: DatabaseError) -> Self { + Self::Database(error) + } +} diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index d066a704a24..76fa45f5a56 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1062,7 +1062,9 @@ impl StaticFileProvider { /// Gets the highest static file block for all segments. pub fn get_highest_static_files(&self) -> HighestStaticFiles { HighestStaticFiles { + headers: self.get_highest_static_file_block(StaticFileSegment::Headers), receipts: self.get_highest_static_file_block(StaticFileSegment::Receipts), + transactions: self.get_highest_static_file_block(StaticFileSegment::Transactions), } } diff --git a/crates/tracing-otlp/Cargo.toml b/crates/tracing-otlp/Cargo.toml index 5b01095d4ff..60cee0aa229 100644 --- a/crates/tracing-otlp/Cargo.toml +++ b/crates/tracing-otlp/Cargo.toml @@ -12,14 +12,13 @@ exclude.workspace = true # obs opentelemetry_sdk = { workspace = true, optional = true } opentelemetry = { workspace = true, optional = true } -opentelemetry-otlp = { workspace = true, optional = true, features = ["grpc-tonic"] } +opentelemetry-otlp = { workspace = true, optional = true } opentelemetry-semantic-conventions = { workspace = true, optional = true } tracing-opentelemetry = { workspace = true, optional = true } tracing-subscriber.workspace = true tracing.workspace = true # misc -clap = { workspace = true, features = ["derive"] } eyre.workspace = true url.workspace = true diff --git a/crates/tracing-otlp/src/lib.rs b/crates/tracing-otlp/src/lib.rs index 2cfd332a408..07415ac2a65 100644 --- a/crates/tracing-otlp/src/lib.rs +++ b/crates/tracing-otlp/src/lib.rs @@ -6,8 +6,7 @@ //! applications. It allows for easily capturing and exporting distributed traces to compatible //! backends like Jaeger, Zipkin, or any other OpenTelemetry-compatible tracing system. -use clap::ValueEnum; -use eyre::ensure; +use eyre::{ensure, WrapErr}; use opentelemetry::{global, trace::TracerProvider, KeyValue, Value}; use opentelemetry_otlp::{SpanExporter, WithExportConfig}; use opentelemetry_sdk::{ @@ -21,10 +20,6 @@ use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::registry::LookupSpan; use url::Url; -// Otlp http endpoint is expected to end with this path. -// See also . -const HTTP_TRACE_ENDPOINT: &str = "/v1/traces"; - /// Creates a tracing [`OpenTelemetryLayer`] that exports spans to an OTLP endpoint. /// /// This layer can be added to a [`tracing_subscriber::Registry`] to enable `OpenTelemetry` tracing @@ -32,7 +27,6 @@ const HTTP_TRACE_ENDPOINT: &str = "/v1/traces"; pub fn span_layer( service_name: impl Into, endpoint: &Url, - protocol: OtlpProtocol, ) -> eyre::Result> where for<'span> S: Subscriber + LookupSpan<'span>, @@ -41,12 +35,8 @@ where let resource = build_resource(service_name); - let span_builder = SpanExporter::builder(); - - let span_exporter = match protocol { - OtlpProtocol::Http => span_builder.with_http().with_endpoint(endpoint.as_str()).build()?, - OtlpProtocol::Grpc => span_builder.with_tonic().with_endpoint(endpoint.as_str()).build()?, - }; + let span_exporter = + SpanExporter::builder().with_http().with_endpoint(endpoint.to_string()).build()?; let tracer_provider = SdkTracerProvider::builder() .with_resource(resource) @@ -55,7 +45,7 @@ where global::set_tracer_provider(tracer_provider.clone()); - let tracer = tracer_provider.tracer("reth"); + let tracer = tracer_provider.tracer("reth-otlp"); Ok(tracing_opentelemetry::layer().with_tracer(tracer)) } @@ -67,37 +57,34 @@ fn build_resource(service_name: impl Into) -> Resource { .build() } -/// OTLP transport protocol type -#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] -pub enum OtlpProtocol { - /// HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - Http, - /// gRPC transport, port 4317 - Grpc, +/// Destination for exported trace spans. +#[derive(Debug, Clone)] +pub enum TraceOutput { + /// Export traces as JSON to stdout. + Stdout, + /// Export traces to an OTLP collector at the specified URL. + Otlp(Url), } -impl OtlpProtocol { - /// Validate and correct the URL to match protocol requirements. +impl TraceOutput { + /// Parses the trace output destination from a string. /// - /// For HTTP: Ensures the path ends with `/v1/traces`, appending it if necessary. - /// For gRPC: Ensures the path does NOT include `/v1/traces`. - pub fn validate_endpoint(&self, url: &mut Url) -> eyre::Result<()> { - match self { - Self::Http => { - if !url.path().ends_with(HTTP_TRACE_ENDPOINT) { - let path = url.path().trim_end_matches('/'); - url.set_path(&format!("{}{}", path, HTTP_TRACE_ENDPOINT)); - } - } - Self::Grpc => { - ensure!( - !url.path().ends_with(HTTP_TRACE_ENDPOINT), - "OTLP gRPC endpoint should not include {} path, got: {}", - HTTP_TRACE_ENDPOINT, - url - ); - } + /// Returns `TraceOutput::Stdout` for "stdout", or `TraceOutput::Otlp` for valid OTLP URLs. + /// OTLP URLs must end with `/v1/traces` per the OTLP specification. + pub fn parse(s: &str) -> eyre::Result { + if s == "stdout" { + return Ok(Self::Stdout); } - Ok(()) + + let url = Url::parse(s).wrap_err("Invalid URL for trace output")?; + + // OTLP specification requires the `/v1/traces` path for trace endpoints + ensure!( + url.path().ends_with("/v1/traces"), + "OTLP trace endpoint must end with /v1/traces, got path: {}", + url.path() + ); + + Ok(Self::Otlp(url)) } } diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 660d40ae464..156bd8c8253 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -1,4 +1,6 @@ use crate::formatter::LogFormat; +#[cfg(feature = "otlp")] +use reth_tracing_otlp::span_layer; use rolling_file::{RollingConditionBasic, RollingFileAppender}; use std::{ fmt, @@ -6,11 +8,6 @@ use std::{ }; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{filter::Directive, EnvFilter, Layer, Registry}; -#[cfg(feature = "otlp")] -use { - reth_tracing_otlp::{span_layer, OtlpProtocol}, - url::Url, -}; /// A worker guard returned by the file layer. /// @@ -21,9 +18,8 @@ pub type FileWorkerGuard = tracing_appender::non_blocking::WorkerGuard; /// A boxed tracing [Layer]. pub(crate) type BoxedLayer = Box + Send + Sync>; -/// Default [directives](Directive) for [`EnvFilter`] which disable high-frequency debug logs from -/// dependencies such as `hyper`, `hickory-resolver`, `hickory_proto`, `discv5`, `jsonrpsee-server`, -/// the `opentelemetry_*` crates, and `hyper_util::client::legacy::pool`. +/// Default [directives](Directive) for [`EnvFilter`] which disables high-frequency debug logs from +/// `hyper`, `hickory-resolver`, `jsonrpsee-server`, and `discv5`. const DEFAULT_ENV_FILTER_DIRECTIVES: [&str; 9] = [ "hyper::proto::h1=off", "hickory_resolver=off", @@ -137,13 +133,12 @@ impl Layers { pub fn with_span_layer( &mut self, service_name: String, - endpoint_exporter: Url, + endpoint_exporter: url::Url, filter: EnvFilter, - otlp_protocol: OtlpProtocol, ) -> eyre::Result<()> { // Create the span provider - let span_layer = span_layer(service_name, &endpoint_exporter, otlp_protocol) + let span_layer = span_layer(service_name, &endpoint_exporter) .map_err(|e| eyre::eyre!("Failed to build OTLP span exporter {}", e))? .with_filter(filter); diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 18e93dc26a4..6525500a2a2 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -51,7 +51,6 @@ use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ sync::{ - atomic::{AtomicUsize, Ordering}, mpsc::{channel, Receiver, Sender}, Arc, }, @@ -117,7 +116,6 @@ fn storage_worker_loop( task_ctx: ProofTaskCtx, work_rx: CrossbeamReceiver, worker_id: usize, - available_workers: Arc, #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where Factory: DatabaseProviderFactory, @@ -146,13 +144,7 @@ fn storage_worker_loop( let mut storage_proofs_processed = 0u64; let mut storage_nodes_processed = 0u64; - // Initially mark this worker as available. - available_workers.fetch_add(1, Ordering::Relaxed); - while let Ok(job) = work_rx.recv() { - // Mark worker as busy. - available_workers.fetch_sub(1, Ordering::Relaxed); - match job { StorageWorkerJob::StorageProof { input, result_sender } => { let hashed_address = input.hashed_address; @@ -194,9 +186,6 @@ fn storage_worker_loop( total_processed = storage_proofs_processed, "Storage proof completed" ); - - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); } StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { @@ -235,9 +224,6 @@ fn storage_worker_loop( total_processed = storage_nodes_processed, "Blinded storage node completed" ); - - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); } } } @@ -258,9 +244,11 @@ fn storage_worker_loop( /// /// # Lifecycle /// -/// Each worker initializes its providers, advertises availability, then loops: -/// receive an account job, mark busy, process the work, respond, and mark available again. -/// The loop ends gracefully once the channel closes. +/// Each worker: +/// 1. Receives `AccountWorkerJob` from crossbeam unbounded channel +/// 2. Computes result using its dedicated long-lived transaction +/// 3. Sends result directly to original caller via `std::mpsc` +/// 4. Repeats until channel closes (graceful shutdown) /// /// # Transaction Reuse /// @@ -281,7 +269,6 @@ fn account_worker_loop( work_rx: CrossbeamReceiver, storage_work_tx: CrossbeamSender, worker_id: usize, - available_workers: Arc, #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where Factory: DatabaseProviderFactory, @@ -310,13 +297,7 @@ fn account_worker_loop( let mut account_proofs_processed = 0u64; let mut account_nodes_processed = 0u64; - // Count this worker as available only after successful initialization. - available_workers.fetch_add(1, Ordering::Relaxed); - while let Ok(job) = work_rx.recv() { - // Mark worker as busy. - available_workers.fetch_sub(1, Ordering::Relaxed); - match job { AccountWorkerJob::AccountMultiproof { mut input, result_sender } => { let span = tracing::debug_span!( @@ -400,9 +381,6 @@ fn account_worker_loop( "Account multiproof completed" ); drop(_span_guard); - - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); } AccountWorkerJob::BlindedAccountNode { path, result_sender } => { @@ -442,9 +420,6 @@ fn account_worker_loop( "Blinded account node completed" ); drop(_span_guard); - - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); } } } @@ -891,12 +866,6 @@ pub struct ProofWorkerHandle { storage_work_tx: CrossbeamSender, /// Direct sender to account worker pool account_work_tx: CrossbeamSender, - /// Counter tracking available storage workers. Workers decrement when starting work, - /// increment when finishing. Used to determine whether to chunk multiproofs. - storage_available_workers: Arc, - /// Counter tracking available account workers. Workers decrement when starting work, - /// increment when finishing. Used to determine whether to chunk multiproofs. - account_available_workers: Arc, } impl ProofWorkerHandle { @@ -924,11 +893,6 @@ impl ProofWorkerHandle { let (storage_work_tx, storage_work_rx) = unbounded::(); let (account_work_tx, account_work_rx) = unbounded::(); - // Initialize availability counters at zero. Each worker will increment when it - // successfully initializes, ensuring only healthy workers are counted. - let storage_available_workers = Arc::new(AtomicUsize::new(0)); - let account_available_workers = Arc::new(AtomicUsize::new(0)); - tracing::debug!( target: "trie::proof_task", storage_worker_count, @@ -946,7 +910,6 @@ impl ProofWorkerHandle { let view_clone = view.clone(); let task_ctx_clone = task_ctx.clone(); let work_rx_clone = storage_work_rx.clone(); - let storage_available_workers_clone = storage_available_workers.clone(); executor.spawn_blocking(move || { #[cfg(feature = "metrics")] @@ -958,7 +921,6 @@ impl ProofWorkerHandle { task_ctx_clone, work_rx_clone, worker_id, - storage_available_workers_clone, #[cfg(feature = "metrics")] metrics, ) @@ -984,7 +946,6 @@ impl ProofWorkerHandle { let task_ctx_clone = task_ctx.clone(); let work_rx_clone = account_work_rx.clone(); let storage_work_tx_clone = storage_work_tx.clone(); - let account_available_workers_clone = account_available_workers.clone(); executor.spawn_blocking(move || { #[cfg(feature = "metrics")] @@ -997,7 +958,6 @@ impl ProofWorkerHandle { work_rx_clone, storage_work_tx_clone, worker_id, - account_available_workers_clone, #[cfg(feature = "metrics")] metrics, ) @@ -1012,12 +972,7 @@ impl ProofWorkerHandle { drop(_guard); - Self::new_handle( - storage_work_tx, - account_work_tx, - storage_available_workers, - account_available_workers, - ) + Self::new_handle(storage_work_tx, account_work_tx) } /// Creates a new [`ProofWorkerHandle`] with direct access to worker pools. @@ -1026,35 +981,8 @@ impl ProofWorkerHandle { const fn new_handle( storage_work_tx: CrossbeamSender, account_work_tx: CrossbeamSender, - storage_available_workers: Arc, - account_available_workers: Arc, ) -> Self { - Self { - storage_work_tx, - account_work_tx, - storage_available_workers, - account_available_workers, - } - } - - /// Returns true if there are available storage workers to process tasks. - pub fn has_available_storage_workers(&self) -> bool { - self.storage_available_workers.load(Ordering::Relaxed) > 0 - } - - /// Returns true if there are available account workers to process tasks. - pub fn has_available_account_workers(&self) -> bool { - self.account_available_workers.load(Ordering::Relaxed) > 0 - } - - /// Returns the number of pending storage tasks in the queue. - pub fn pending_storage_tasks(&self) -> usize { - self.storage_work_tx.len() - } - - /// Returns the number of pending account tasks in the queue. - pub fn pending_account_tasks(&self) -> usize { - self.account_work_tx.len() + Self { storage_work_tx, account_work_tx } } /// Dispatch a storage proof computation to storage worker pool diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs index bc8ae006074..b5454dd3970 100644 --- a/crates/trie/sparse-parallel/src/lower.rs +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -122,26 +122,4 @@ impl LowerSparseSubtrie { Self::Blind(None) => 0, } } - - /// Shrinks the capacity of the subtrie's node storage. - /// Works for both revealed and blind tries with allocated storage. - pub(crate) fn shrink_nodes_to(&mut self, size: usize) { - match self { - Self::Revealed(trie) | Self::Blind(Some(trie)) => { - trie.shrink_nodes_to(size); - } - Self::Blind(None) => {} - } - } - - /// Shrinks the capacity of the subtrie's value storage. - /// Works for both revealed and blind tries with allocated storage. - pub(crate) fn shrink_values_to(&mut self, size: usize) { - match self { - Self::Revealed(trie) | Self::Blind(Some(trie)) => { - trie.shrink_values_to(size); - } - Self::Blind(None) => {} - } - } } diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 34c1ff2a963..5e5a838f414 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -883,42 +883,6 @@ impl SparseTrieInterface for ParallelSparseTrie { self.upper_subtrie.value_capacity() + self.lower_subtries.iter().map(|trie| trie.value_capacity()).sum::() } - - fn shrink_nodes_to(&mut self, size: usize) { - // Distribute the capacity across upper and lower subtries - // - // Always include upper subtrie, plus any lower subtries - let total_subtries = 1 + NUM_LOWER_SUBTRIES; - let size_per_subtrie = size / total_subtries; - - // Shrink the upper subtrie - self.upper_subtrie.shrink_nodes_to(size_per_subtrie); - - // Shrink lower subtries (works for both revealed and blind with allocation) - for subtrie in &mut self.lower_subtries { - subtrie.shrink_nodes_to(size_per_subtrie); - } - - // shrink masks maps - self.branch_node_hash_masks.shrink_to(size); - self.branch_node_tree_masks.shrink_to(size); - } - - fn shrink_values_to(&mut self, size: usize) { - // Distribute the capacity across upper and lower subtries - // - // Always include upper subtrie, plus any lower subtries - let total_subtries = 1 + NUM_LOWER_SUBTRIES; - let size_per_subtrie = size / total_subtries; - - // Shrink the upper subtrie - self.upper_subtrie.shrink_values_to(size_per_subtrie); - - // Shrink lower subtries (works for both revealed and blind with allocation) - for subtrie in &mut self.lower_subtries { - subtrie.shrink_values_to(size_per_subtrie); - } - } } impl ParallelSparseTrie { @@ -2147,16 +2111,6 @@ impl SparseSubtrie { pub(crate) fn value_capacity(&self) -> usize { self.inner.value_capacity() } - - /// Shrinks the capacity of the subtrie's node storage. - pub(crate) fn shrink_nodes_to(&mut self, size: usize) { - self.nodes.shrink_to(size); - } - - /// Shrinks the capacity of the subtrie's value storage. - pub(crate) fn shrink_values_to(&mut self, size: usize) { - self.inner.values.shrink_to(size); - } } /// Helper type for [`SparseSubtrie`] to mutably access only a subset of fields from the original @@ -2617,19 +2571,10 @@ impl SparseSubtrieBuffers { /// Clears all buffers. fn clear(&mut self) { self.path_stack.clear(); - self.path_stack.shrink_to_fit(); - self.rlp_node_stack.clear(); - self.rlp_node_stack.shrink_to_fit(); - self.branch_child_buf.clear(); - self.branch_child_buf.shrink_to_fit(); - self.branch_value_stack_buf.clear(); - self.branch_value_stack_buf.shrink_to_fit(); - self.rlp_buf.clear(); - self.rlp_buf.shrink_to_fit(); } } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index a202ebc8b2b..aef552da3dd 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -43,32 +43,6 @@ where Self(trie) } - /// Shrink the cleared sparse trie's capacity to the given node and value size. - /// This helps reduce memory usage when the trie has excess capacity. - /// The capacity is distributed equally across the account trie and all storage tries. - pub fn shrink_to(&mut self, node_size: usize, value_size: usize) { - // Count total number of storage tries (active + cleared + default) - let storage_tries_count = self.0.storage.tries.len() + self.0.storage.cleared_tries.len(); - - // Total tries = 1 account trie + all storage tries - let total_tries = 1 + storage_tries_count; - - // Distribute capacity equally among all tries - let node_size_per_trie = node_size / total_tries; - let value_size_per_trie = value_size / total_tries; - - // Shrink the account trie - self.0.state.shrink_nodes_to(node_size_per_trie); - self.0.state.shrink_values_to(value_size_per_trie); - - // Give storage tries the remaining capacity after account trie allocation - let storage_node_size = node_size.saturating_sub(node_size_per_trie); - let storage_value_size = value_size.saturating_sub(value_size_per_trie); - - // Shrink all storage tries (they will redistribute internally) - self.0.storage.shrink_to(storage_node_size, storage_value_size); - } - /// Returns the cleared [`SparseStateTrie`], consuming this instance. pub fn into_inner(self) -> SparseStateTrie { self.0 @@ -886,31 +860,6 @@ impl StorageTries { set })); } - - /// Shrinks the capacity of all storage tries (active, cleared, and default) to the given sizes. - /// The capacity is distributed equally among all tries that have allocations. - fn shrink_to(&mut self, node_size: usize, value_size: usize) { - // Count total number of tries with capacity (active + cleared + default) - let active_count = self.tries.len(); - let cleared_count = self.cleared_tries.len(); - let total_tries = 1 + active_count + cleared_count; - - // Distribute capacity equally among all tries - let node_size_per_trie = node_size / total_tries; - let value_size_per_trie = value_size / total_tries; - - // Shrink active storage tries - for trie in self.tries.values_mut() { - trie.shrink_nodes_to(node_size_per_trie); - trie.shrink_values_to(value_size_per_trie); - } - - // Shrink cleared storage tries - for trie in &mut self.cleared_tries { - trie.shrink_nodes_to(node_size_per_trie); - trie.shrink_values_to(value_size_per_trie); - } - } } impl StorageTries { diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 5b7b6193f96..8fdbb78d876 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -228,14 +228,6 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// This returns the capacity of any inner data structures which store leaf values. fn value_capacity(&self) -> usize; - - /// Shrink the capacity of the sparse trie's node storage to the given size. - /// This will reduce memory usage if the current capacity is higher than the given size. - fn shrink_nodes_to(&mut self, size: usize); - - /// Shrink the capacity of the sparse trie's value storage to the given size. - /// This will reduce memory usage if the current capacity is higher than the given size. - fn shrink_values_to(&mut self, size: usize); } /// Struct for passing around branch node mask information. diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 8500ea400b5..737da842254 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -275,28 +275,6 @@ impl SparseTrie { _ => 0, } } - - /// Shrinks the capacity of the sparse trie's node storage. - /// Works for both revealed and blind tries with allocated storage. - pub fn shrink_nodes_to(&mut self, size: usize) { - match self { - Self::Blind(Some(trie)) | Self::Revealed(trie) => { - trie.shrink_nodes_to(size); - } - _ => {} - } - } - - /// Shrinks the capacity of the sparse trie's value storage. - /// Works for both revealed and blind tries with allocated storage. - pub fn shrink_values_to(&mut self, size: usize) { - match self { - Self::Blind(Some(trie)) | Self::Revealed(trie) => { - trie.shrink_values_to(size); - } - _ => {} - } - } } /// The representation of revealed sparse trie. @@ -1110,16 +1088,6 @@ impl SparseTrieInterface for SerialSparseTrie { fn value_capacity(&self) -> usize { self.values.capacity() } - - fn shrink_nodes_to(&mut self, size: usize) { - self.nodes.shrink_to(size); - self.branch_node_tree_masks.shrink_to(size); - self.branch_node_hash_masks.shrink_to(size); - } - - fn shrink_values_to(&mut self, size: usize) { - self.values.shrink_to(size); - } } impl SerialSparseTrie { diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index c35216d6b5c..041d494523c 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -116,28 +116,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index 6b3c9e4b657..96bdcf7a98c 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -102,28 +102,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index a7bda7c3da7..f2a49420837 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -167,28 +167,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index 4b8b8ca2cce..c86273aacf4 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -119,28 +119,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 1548558fe39..88fd92763f8 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -111,28 +111,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index b48ba180982..c467fe9d3dd 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -110,28 +110,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 9f22178ec4c..d4b59a05223 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -113,28 +113,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index fe7dd7d0bae..4bb81ac07c9 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -146,28 +146,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index c778320f2d8..c75a889458b 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -109,28 +109,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index dfcfcac1886..8c20c7e311a 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -111,28 +111,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 981d0c9f9a5..3b8df2f3a4f 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -119,28 +119,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 8e045a4cdf1..3980903c65d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -119,28 +119,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index 3be1cd183b2..16131a95a17 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -152,28 +152,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index a954093dd5d..0c09f5be69b 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -106,28 +106,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index 6436afc2133..9c08ff331ed 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -109,28 +109,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 5bd316847c0..47695e1b22a 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -119,28 +119,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index c87496d910d..7611b69946d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -106,28 +106,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index f8f1c199de5..b18faa93205 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -164,28 +164,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 7aeaa8db49a..bf5b0ac534c 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -105,28 +105,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index da732cda33b..cd413c12841 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -170,28 +170,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 77afcd5a6b3..7d62409a638 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -165,28 +165,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 405009c6071..8e3e1cdb0a2 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -166,28 +166,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 2ef6fdbe838..49c0e098098 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -186,28 +186,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index 51dc401d567..ac1c7ff254b 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -154,28 +154,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 48b1c75c591..3fc6988dc69 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -743,25 +743,25 @@ Pruning: --full Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored - --prune.block-interval + --block-interval Minimum pruning interval measured in blocks - --prune.sender-recovery.full + --prune.senderrecovery.full Prunes all sender recovery data - --prune.sender-recovery.distance + --prune.senderrecovery.distance Prune sender recovery data before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.sender-recovery.before + --prune.senderrecovery.before Prune sender recovery data before the specified block number. The specified block number is not pruned - --prune.transaction-lookup.full + --prune.transactionlookup.full Prunes all transaction lookup data - --prune.transaction-lookup.distance + --prune.transactionlookup.distance Prune transaction lookup data before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.transaction-lookup.before + --prune.transactionlookup.before Prune transaction lookup data before the specified block number. The specified block number is not pruned --prune.receipts.full @@ -776,22 +776,22 @@ Pruning: --prune.receipts.before Prune receipts before the specified block number. The specified block number is not pruned - --prune.account-history.full + --prune.accounthistory.full Prunes all account history - --prune.account-history.distance + --prune.accounthistory.distance Prune account before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.account-history.before + --prune.accounthistory.before Prune account history before the specified block number. The specified block number is not pruned - --prune.storage-history.full + --prune.storagehistory.full Prunes all storage history data - --prune.storage-history.distance + --prune.storagehistory.distance Prune storage history before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.storage-history.before + --prune.storagehistory.before Prune storage history before the specified block number. The specified block number is not pruned --prune.bodies.pre-merge @@ -993,28 +993,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 7b37fdfdaa3..b81c00a0382 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -103,28 +103,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index bbe6b375e5b..fd28a37ebb1 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -323,28 +323,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 324b01daac5..63baa86d367 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -114,28 +114,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index 533bd71de2e..f9f94497547 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -323,28 +323,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index a8ac7fbd0df..78d6dd8d3ba 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -100,28 +100,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index 2d136630298..2089c92461e 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -100,28 +100,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 8dfd3003816..8f5828e8a67 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -154,28 +154,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index b7371fa4cf6..56a7e3558c4 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -167,28 +167,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index 006c6c74340..822f0f0c2db 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -103,28 +103,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 19e813bec22..037495979a0 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -169,28 +169,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 20cf8660bf1..8484379fe36 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -161,28 +161,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index 70fad94ea3a..079804ff088 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -118,28 +118,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index bed5d33329a..7aee318e1ac 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -118,28 +118,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index 3bada103c87..17b2b7c9515 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -118,28 +118,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index 723a54e9272..de64aa51c33 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -118,28 +118,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index ae57239c9d3..5407938072f 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -390,28 +390,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index a7581b22b3f..2d2f94d6801 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -162,28 +162,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index b04e1920b75..a376af84012 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -110,28 +110,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index 2c22f8127c1..ce62c643600 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -110,28 +110,14 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. - If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-protocol - OTLP transport protocol to use for exporting traces. - - - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path - - Defaults to HTTP if not specified. - - Possible values: - - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path - - grpc: gRPC transport, port 4317 - - [env: OTEL_EXPORTER_OTLP_PROTOCOL=] - [default: http] - --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/examples/custom-node/src/pool.rs b/examples/custom-node/src/pool.rs index 8828803a0f3..0959b3bcae0 100644 --- a/examples/custom-node/src/pool.rs +++ b/examples/custom-node/src/pool.rs @@ -17,7 +17,7 @@ pub enum CustomPooledTransaction { /// A regular Optimism transaction as defined by [`OpPooledTransaction`]. #[envelope(flatten)] Op(OpPooledTransaction), - /// A [`TxPayment`] tagged with type 0x2A (decimal 42). + /// A [`TxPayment`] tagged with type 0x7E. #[envelope(ty = 42)] Payment(Signed), } From 343695b4c51b3ec70f4e2b873d7be07b1f605230 Mon Sep 17 00:00:00 2001 From: Ishika Choudhury <117741714+Rimeeeeee@users.noreply.github.com> Date: Fri, 24 Oct 2025 12:48:28 +0530 Subject: [PATCH 23/23] fixes --- crates/engine/invalid-block-hooks/src/witness.rs | 3 ++- crates/engine/tree/src/tree/metrics.rs | 3 ++- crates/engine/tree/src/tree/payload_validator.rs | 3 ++- crates/engine/util/src/reorg.rs | 3 ++- crates/ethereum/payload/src/lib.rs | 3 ++- crates/evm/evm/src/execute.rs | 3 ++- crates/optimism/flashblocks/src/worker.rs | 3 ++- crates/optimism/payload/src/builder.rs | 3 ++- crates/rpc/rpc-eth-api/src/helpers/call.rs | 3 ++- crates/rpc/rpc-eth-api/src/helpers/pending_block.rs | 3 ++- 10 files changed, 20 insertions(+), 10 deletions(-) diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 19a2691d76d..b2c39af66c4 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -539,7 +539,8 @@ mod tests { )) .with_bundle_update() .build(), - ); + ) + .with_bal_builder(); // Insert contracts from the fixture into the state cache for (code_hash, bytecode) in &bundle_state.contracts { diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 3a4f85fcd2d..43bd9f7c64a 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -307,7 +307,8 @@ mod tests { .with_bundle_update() .without_state_clear() .build(), - ); + ) + .with_bal_builder(); let evm = EthEvm::new( Context::mainnet().with_db(db).build_mainnet_with_inspector(NoOpInspector {}), false, diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 084688a90bf..b1c0e79f7bb 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -631,7 +631,8 @@ where .with_bundle_update() .without_state_clear() .build(), - ); + ) + .with_bal_builder(); let evm = self.evm_config.evm_with_env(&mut db, env.evm_env.clone()); let ctx = diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 263a08b4f86..200619c9857 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -288,7 +288,8 @@ where .with_database_ref(StateProviderDatabase::new(&state_provider)) .with_bundle_update() .build(), - ); + ) + .with_bal_builder(); let ctx = evm_config.context_for_block(&reorg_target).map_err(RethError::other)?; let evm = evm_config.evm_for_block(&mut state, &reorg_target).map_err(RethError::other)?; diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 9e9063002c0..85912e38b0f 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -159,7 +159,8 @@ where let state = StateProviderDatabase::new(&state_provider); let mut db = BalDatabase::new( State::builder().with_database(cached_reads.as_db_mut(state)).with_bundle_update().build(), - ); + ) + .with_bal_builder(); let mut builder = evm_config .builder_for_next_block( diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 2685f961ce6..1b1f97c1709 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -566,7 +566,8 @@ impl BasicBlockExecutor { pub fn new(strategy_factory: F, db: DB) -> Self { let db = BalDatabase::new( State::builder().with_database(db).with_bundle_update().without_state_clear().build(), - ); + ) + .with_bal_builder(); Self { strategy_factory, db } } diff --git a/crates/optimism/flashblocks/src/worker.rs b/crates/optimism/flashblocks/src/worker.rs index 39ca6fe5d7b..3e30b4a1248 100644 --- a/crates/optimism/flashblocks/src/worker.rs +++ b/crates/optimism/flashblocks/src/worker.rs @@ -96,7 +96,8 @@ where let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); let mut state = BalDatabase::new( State::builder().with_database(cached_db).with_bundle_update().build(), - ); + ) + .with_bal_builder(); let mut builder = self .evm_config diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index ad8152123b7..fad1d8676d9 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -342,7 +342,8 @@ impl OpBuilder<'_, Txs> { debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number(), "building new payload"); let mut db = - BalDatabase::new(State::builder().with_database(db).with_bundle_update().build()); + BalDatabase::new(State::builder().with_database(db).with_bundle_update().build()) + .with_bal_builder(); let mut builder = ctx.block_builder(&mut db)?; diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 8da63dd57be..7833505eda5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -100,7 +100,8 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA //todo? let mut db = BalDatabase::new( State::builder().with_database(StateProviderDatabase::new(state)).build(), - ); + ) + .with_bal_builder(); let mut blocks: Vec>> = Vec::with_capacity(block_state_calls.len()); for block in block_state_calls { diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 97400657e0f..9c624c58057 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -240,7 +240,8 @@ pub trait LoadPendingBlock: .map_err(Self::Error::from_eth_err)?; let state = StateProviderDatabase::new(&state_provider); let mut db = - BalDatabase::new(State::builder().with_database(state).with_bundle_update().build()); + BalDatabase::new(State::builder().with_database(state).with_bundle_update().build()) + .with_bal_builder(); let mut builder = self .evm_config()