diff --git a/Cargo.lock b/Cargo.lock index 8073071a615..c8e4f1bd056 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -37,7 +37,7 @@ dependencies = [ "eth2_keystore", "eth2_wallet", "filesystem", - "rand 0.8.5", + "rand 0.9.2", "regex", "rpassword", "serde", @@ -228,7 +228,7 @@ dependencies = [ "paste", "proptest", "proptest-derive", - "rand 0.9.0", + "rand 0.9.2", "ruint", "rustc-hash 2.1.1", "serde", @@ -911,7 +911,7 @@ dependencies = [ "operation_pool", "parking_lot 0.12.3", "proto_array", - "rand 0.8.5", + "rand 0.9.2", "rayon", "safe_arith", "sensitive_url", @@ -1175,7 +1175,7 @@ dependencies = [ "ethereum_ssz", "fixed_bytes", "hex", - "rand 0.8.5", + "rand 0.9.2", "safe_arith", "serde", "tree_hash", @@ -1573,7 +1573,7 @@ dependencies = [ "monitoring_api", "network", "operation_pool", - "rand 0.8.5", + "rand 0.9.2", "sensitive_url", "serde", "serde_json", @@ -2834,7 +2834,7 @@ dependencies = [ "multiaddr", "pretty_reqwest_error", "proto_array", - "rand 0.8.5", + "rand 0.9.2", "reqwest 0.11.27", "reqwest-eventsource", "sensitive_url", @@ -2891,7 +2891,7 @@ dependencies = [ "hex", "hmac 0.11.0", "pbkdf2 0.8.0", - "rand 0.8.5", + "rand 0.9.2", "scrypt 0.7.0", "serde", "serde_json", @@ -2932,7 +2932,7 @@ dependencies = [ "eth2_key_derivation", "eth2_keystore", "hex", - "rand 0.8.5", + "rand 0.9.2", "serde", "serde_json", "serde_repr", @@ -3325,7 +3325,7 @@ dependencies = [ "metrics", "parking_lot 0.12.3", "pretty_reqwest_error", - "rand 0.8.5", + "rand 0.9.2", "reqwest 0.11.27", "sensitive_url", "serde", @@ -3873,7 +3873,7 @@ dependencies = [ "ff 0.13.1", "rand 0.8.5", "rand_core 0.6.4", - "rand_xorshift", + "rand_xorshift 0.3.0", "subtle", ] @@ -4113,7 +4113,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.0", + "rand 0.9.2", "socket2", "thiserror 2.0.12", "tinyvec", @@ -4135,7 +4135,7 @@ dependencies = [ "moka", "once_cell", "parking_lot 0.12.3", - "rand 0.9.0", + "rand 0.9.2", "resolv-conf", "smallvec", "thiserror 2.0.12", @@ -4295,7 +4295,7 @@ dependencies = [ "operation_pool", "parking_lot 0.12.3", "proto_array", - "rand 0.8.5", + "rand 0.9.2", "safe_arith", "sensitive_url", "serde", @@ -4792,7 +4792,7 @@ dependencies = [ "lockfile", "metrics", "parking_lot 0.12.3", - "rand 0.8.5", + "rand 0.9.2", "reqwest 0.11.27", "serde", "serde_json", @@ -5693,7 +5693,7 @@ dependencies = [ "prometheus-client", "quickcheck", "quickcheck_macros", - "rand 0.8.5", + "rand 0.9.2", "regex", "serde", "sha2 0.9.9", @@ -6143,7 +6143,7 @@ dependencies = [ "hyper 1.6.0", "hyper-util", "log", - "rand 0.9.0", + "rand 0.9.2", "regex", "serde_json", "serde_urlencoded", @@ -6389,7 +6389,9 @@ dependencies = [ "operation_pool", "parking_lot 0.12.3", "rand 0.8.5", + "rand 0.9.2", "rand_chacha 0.3.1", + "rand_chacha 0.9.0", "serde_json", "slot_clock", "smallvec", @@ -6771,7 +6773,7 @@ dependencies = [ "futures-util", "opentelemetry", "percent-encoding", - "rand 0.9.0", + "rand 0.9.2", "serde_json", "thiserror 2.0.12", ] @@ -6789,7 +6791,7 @@ dependencies = [ "maplit", "metrics", "parking_lot 0.12.3", - "rand 0.8.5", + "rand 0.9.2", "rayon", "serde", "state_processing", @@ -7374,7 +7376,7 @@ dependencies = [ "num-traits", "rand 0.8.5", "rand_chacha 0.3.1", - "rand_xorshift", + "rand_xorshift 0.3.0", "regex-syntax 0.8.5", "rusty-fork", "tempfile", @@ -7619,14 +7621,13 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", "serde", - "zerocopy 0.8.23", ] [[package]] @@ -7677,6 +7678,15 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.3", +] + [[package]] name = "rayon" version = "1.10.0" @@ -8023,7 +8033,7 @@ dependencies = [ "primitive-types 0.12.2", "proptest", "rand 0.8.5", - "rand 0.9.0", + "rand 0.9.2", "rlp", "ruint-macro", "serde", @@ -8794,7 +8804,7 @@ dependencies = [ "maplit", "metrics", "parking_lot 0.12.3", - "rand 0.8.5", + "rand 0.9.2", "rayon", "redb", "safe_arith", @@ -8960,7 +8970,7 @@ dependencies = [ "itertools 0.10.5", "merkle_proof", "metrics", - "rand 0.8.5", + "rand 0.9.2", "rayon", "safe_arith", "smallvec", @@ -9006,7 +9016,7 @@ dependencies = [ "lru", "metrics", "parking_lot 0.12.3", - "rand 0.8.5", + "rand 0.9.2", "redb", "safe_arith", "serde", @@ -9915,8 +9925,8 @@ dependencies = [ "milhouse", "parking_lot 0.12.3", "paste", - "rand 0.8.5", - "rand_xorshift", + "rand 0.9.2", + "rand_xorshift 0.4.0", "rayon", "regex", "rpds", @@ -10155,7 +10165,7 @@ dependencies = [ "filesystem", "hex", "lockfile", - "rand 0.8.5", + "rand 0.9.2", "tempfile", "tree_hash", "types", @@ -10185,7 +10195,7 @@ dependencies = [ "lighthouse_version", "logging", "parking_lot 0.12.3", - "rand 0.8.5", + "rand 0.9.2", "sensitive_url", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index c2eb868b763..38edb126a03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,7 +94,7 @@ members = [ resolver = "2" [workspace.package] -edition = "2021" +edition = "2024" [workspace.dependencies] account_utils = { path = "common/account_utils" } @@ -208,7 +208,7 @@ quickcheck = "1" quickcheck_macros = "1" quote = "1" r2d2 = "0.8" -rand = "0.8" +rand = "0.9.0" rayon = "1.7" regex = "1" reqwest = { version = "0.11", default-features = false, features = [ diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 3db8c3f152d..427ca9fa130 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -1,15 +1,15 @@ use crate::common::read_wallet_name_from_cli; use crate::{SECRETS_DIR_FLAG, WALLETS_DIR_FLAG}; use account_utils::{ - random_password, read_password_from_user, strip_off_newlines, validator_definitions, PlainText, - STDIN_INPUTS_FLAG, + PlainText, STDIN_INPUTS_FLAG, random_password, read_password_from_user, strip_off_newlines, + validator_definitions, }; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; -use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR}; +use directory::{DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR, parse_path_or_default_with_flag}; use environment::Environment; use eth2_wallet_manager::WalletManager; -use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use std::ffi::OsStr; use std::fs; use std::fs::create_dir_all; @@ -148,7 +148,9 @@ pub fn cli_run( return Err(format!( "No wallet directory at {:?}. Use the `lighthouse --network {} {} {} {}` command to create a wallet", wallet_base_dir, - matches.get_one::("network").unwrap_or(&String::from("")), + matches + .get_one::("network") + .unwrap_or(&String::from("")), crate::CMD, crate::wallet::CMD, crate::wallet::create::CMD diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 1393d0f1526..5ea77f284e2 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -4,8 +4,8 @@ use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; use environment::Environment; use eth2::{ - types::{GenesisData, StateId, ValidatorData, ValidatorId, ValidatorStatus}, BeaconNodeHttpClient, Timeouts, + types::{GenesisData, StateId, ValidatorData, ValidatorId, ValidatorStatus}, }; use eth2_keystore::Keystore; use eth2_network_config::Eth2NetworkConfig; @@ -239,9 +239,11 @@ async fn publish_voluntary_exit( let withdrawal_epoch = validator_data.validator.withdrawable_epoch; let current_epoch = get_current_epoch::(genesis_data.genesis_time, spec) .ok_or("Failed to get current epoch. Please check your system time")?; - eprintln!("Voluntary exit has been accepted into the beacon chain, but not yet finalized. \ + eprintln!( + "Voluntary exit has been accepted into the beacon chain, but not yet finalized. \ Finalization may take several minutes or longer. Before finalization there is a low \ - probability that the exit may be reverted."); + probability that the exit may be reverted." + ); eprintln!( "Current epoch: {}, Exit epoch: {}, Withdrawable epoch: {}", current_epoch, exit_epoch, withdrawal_epoch @@ -401,7 +403,7 @@ mod tests { use eth2_keystore::KeystoreBuilder; use std::fs::File; use std::io::Write; - use tempfile::{tempdir, TempDir}; + use tempfile::{TempDir, tempdir}; const PASSWORD: &str = "cats"; const KEYSTORE_NAME: &str = "keystore-m_12381_3600_0_0_0-1595406747.json"; diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index b985484d11b..6afdd81b71e 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -1,17 +1,17 @@ use crate::wallet::create::PASSWORD_FLAG; use account_utils::validator_definitions::SigningDefinition; use account_utils::{ + STDIN_INPUTS_FLAG, eth2_keystore::Keystore, read_password_from_user, validator_definitions::{ - recursively_find_voting_keystores, PasswordStorage, ValidatorDefinition, - ValidatorDefinitions, CONFIG_FILENAME, + CONFIG_FILENAME, PasswordStorage, ValidatorDefinition, ValidatorDefinitions, + recursively_find_voting_keystores, }, - STDIN_INPUTS_FLAG, }; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; -use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use std::fs; use std::path::PathBuf; use std::thread::sleep; @@ -133,7 +133,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin return Err(format!( "Must supply either --{} or --{}", KEYSTORE_FLAG, DIR_FLAG - )) + )); } }; @@ -227,19 +227,20 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin if let Some(ValidatorDefinition { signing_definition: SigningDefinition::LocalKeystore { - voting_keystore_password: ref mut old_passwd, + voting_keystore_password: old_passwd, .. }, .. }) = old_validator_def_opt + && old_passwd.is_none() + && password_opt.is_some() { - if old_passwd.is_none() && password_opt.is_some() { - *old_passwd = password_opt; - defs.save(&validator_dir) - .map_err(|e| format!("Unable to save {}: {:?}", CONFIG_FILENAME, e))?; - eprintln!("Password updated for public key {}", voting_pubkey); - } + *old_passwd = password_opt; + defs.save(&validator_dir) + .map_err(|e| format!("Unable to save {}: {:?}", CONFIG_FILENAME, e))?; + eprintln!("Password updated for public key {}", voting_pubkey); } + eprintln!( "Skipping import of keystore for existing public key: {:?}", src_keystore diff --git a/account_manager/src/validator/mod.rs b/account_manager/src/validator/mod.rs index b699301cde3..5a6c9439a66 100644 --- a/account_manager/src/validator/mod.rs +++ b/account_manager/src/validator/mod.rs @@ -8,7 +8,7 @@ pub mod slashing_protection; use crate::{VALIDATOR_DIR_FLAG, VALIDATOR_DIR_FLAG_ALIAS}; use clap::{Arg, ArgAction, ArgMatches, Command}; -use directory::{parse_path_or_default_with_flag, DEFAULT_VALIDATOR_DIR}; +use directory::{DEFAULT_VALIDATOR_DIR, parse_path_or_default_with_flag}; use environment::Environment; use std::path::PathBuf; use types::EthSpec; diff --git a/account_manager/src/validator/modify.rs b/account_manager/src/validator/modify.rs index 571cd28bf5e..36f6b53d852 100644 --- a/account_manager/src/validator/modify.rs +++ b/account_manager/src/validator/modify.rs @@ -69,7 +69,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin return Err(format!( "{} does not have a {} command. See --help", CMD, unknown - )) + )); } _ => return Err(format!("No command provided for {}. See --help", CMD)), }; diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index 19d161a468f..a61d19d7b69 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -1,13 +1,13 @@ use super::create::STORE_WITHDRAW_FLAG; -use crate::validator::create::COUNT_FLAG; use crate::SECRETS_DIR_FLAG; -use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; -use account_utils::{random_password, read_mnemonic_from_cli, STDIN_INPUTS_FLAG}; +use crate::validator::create::COUNT_FLAG; +use account_utils::eth2_keystore::{Keystore, KeystoreBuilder, keypair_from_secret}; +use account_utils::{STDIN_INPUTS_FLAG, random_password, read_mnemonic_from_cli}; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; -use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; +use directory::{DEFAULT_SECRET_DIR, parse_path_or_default_with_flag}; use eth2_wallet::bip39::Seed; -use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType, ValidatorKeystores}; +use eth2_wallet::{KeyType, ValidatorKeystores, recover_validator_secret_from_mnemonic}; use std::fs::create_dir_all; use std::path::PathBuf; use validator_dir::Builder as ValidatorDirBuilder; @@ -97,7 +97,9 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin .map_err(|e| format!("Could not create secrets dir at {secrets_dir:?}: {e:?}"))?; eprintln!(); - eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING."); + eprintln!( + "WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING." + ); eprintln!(); let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_inputs)?; diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index 57d532d0ae2..18064b990f3 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -1,8 +1,8 @@ use clap::{Arg, ArgAction, ArgMatches, Command}; use environment::Environment; use slashing_protection::{ - interchange::Interchange, InterchangeError, InterchangeImportOutcome, SlashingDatabase, - SLASHING_PROTECTION_FILENAME, + InterchangeError, InterchangeImportOutcome, SLASHING_PROTECTION_FILENAME, SlashingDatabase, + interchange::Interchange, }; use std::fs::File; use std::path::PathBuf; diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index 6369646929a..052e4bf2178 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -1,13 +1,13 @@ -use crate::common::read_wallet_name_from_cli; use crate::WALLETS_DIR_FLAG; +use crate::common::read_wallet_name_from_cli; use account_utils::{ - is_password_sufficiently_complex, random_password, read_password_from_user, strip_off_newlines, - STDIN_INPUTS_FLAG, + STDIN_INPUTS_FLAG, is_password_sufficiently_complex, random_password, read_password_from_user, + strip_off_newlines, }; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2_wallet::{ - bip39::{Language, Mnemonic, MnemonicType}, PlainText, + bip39::{Language, Mnemonic, MnemonicType}, }; use eth2_wallet_manager::{LockedWallet, WalletManager, WalletType}; use filesystem::create_with_600_perms; diff --git a/account_manager/src/wallet/mod.rs b/account_manager/src/wallet/mod.rs index f6f3bb0419a..5f8d3948a09 100644 --- a/account_manager/src/wallet/mod.rs +++ b/account_manager/src/wallet/mod.rs @@ -4,7 +4,7 @@ pub mod recover; use crate::WALLETS_DIR_FLAG; use clap::{Arg, ArgAction, ArgMatches, Command}; -use directory::{parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; +use directory::{DEFAULT_WALLET_DIR, parse_path_or_default_with_flag}; use std::fs::create_dir_all; use std::path::PathBuf; diff --git a/account_manager/src/wallet/recover.rs b/account_manager/src/wallet/recover.rs index 766d5dbe0cb..6d3b635090c 100644 --- a/account_manager/src/wallet/recover.rs +++ b/account_manager/src/wallet/recover.rs @@ -1,6 +1,6 @@ use crate::wallet::create::create_wallet_from_mnemonic; use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; -use account_utils::{read_mnemonic_from_cli, STDIN_INPUTS_FLAG}; +use account_utils::{STDIN_INPUTS_FLAG, read_mnemonic_from_cli}; use clap::{Arg, ArgAction, ArgMatches, Command}; use std::path::PathBuf; @@ -63,7 +63,9 @@ pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), Str let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); eprintln!(); - eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING."); + eprintln!( + "WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING." + ); eprintln!(); let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_inputs)?; diff --git a/beacon_node/beacon_chain/benches/benches.rs b/beacon_node/beacon_chain/benches/benches.rs index aae627da13c..c005ebb91a0 100644 --- a/beacon_node/beacon_chain/benches/benches.rs +++ b/beacon_node/beacon_chain/benches/benches.rs @@ -2,13 +2,13 @@ use std::sync::Arc; use beacon_chain::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; use beacon_chain::test_utils::get_kzg; -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, black_box, criterion_group, criterion_main}; use bls::Signature; use kzg::{KzgCommitment, KzgProof}; use types::{ - beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, ChainSpec, - EmptyBlock, EthSpec, KzgProofs, MainnetEthSpec, SignedBeaconBlock, + BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, ChainSpec, EmptyBlock, EthSpec, KzgProofs, + MainnetEthSpec, SignedBeaconBlock, beacon_block_body::KzgCommitments, }; fn create_test_block_and_blobs( diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 7d88268cf9f..554cd431b3d 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -9,13 +9,13 @@ use state_processing::per_epoch_processing::altair::{ process_inactivity_updates_slow, process_justification_and_finalization, }; use state_processing::per_epoch_processing::base::rewards_and_penalties::{ - get_attestation_component_delta, get_attestation_deltas_all, get_attestation_deltas_subset, - get_inactivity_penalty_delta, get_inclusion_delay_delta, ProposerRewardCalculation, + ProposerRewardCalculation, get_attestation_component_delta, get_attestation_deltas_all, + get_attestation_deltas_subset, get_inactivity_penalty_delta, get_inclusion_delay_delta, }; use state_processing::per_epoch_processing::base::validator_statuses::InclusionInfo; use state_processing::per_epoch_processing::base::{ - process_justification_and_finalization as process_justification_and_finalization_base, TotalBalances, ValidatorStatus, ValidatorStatuses, + process_justification_and_finalization as process_justification_and_finalization_base, }; use state_processing::{ common::altair::BaseRewardPerIncrement, diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index f057c0619d5..470664d4429 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -35,11 +35,10 @@ mod batch; use crate::{ - metrics, + BeaconChain, BeaconChainError, BeaconChainTypes, metrics, observed_aggregates::{ObserveOutcome, ObservedAttestationKey}, observed_attesters::Error as ObservedAttestersError, single_attestation::single_attestation_to_attestation, - BeaconChain, BeaconChainError, BeaconChainTypes, }; use bls::verify_signature_sets; use itertools::Itertools; @@ -414,11 +413,12 @@ fn process_slash_info( if let Some(slasher) = chain.slasher.as_ref() { let (indexed_attestation, check_signature, err) = match slash_info { SignatureNotChecked(attestation, err) => { - if let Error::UnknownHeadBlock { .. } = err { - if attestation.data().beacon_block_root == attestation.data().target.root { - return err; - } + if let Error::UnknownHeadBlock { .. } = err + && attestation.data().beacon_block_root == attestation.data().target.root + { + return err; } + match obtain_indexed_attestation_and_committees_per_slot(chain, attestation) { Ok((indexed, _)) => (indexed, true, err), Err(e) => { @@ -432,10 +432,10 @@ fn process_slash_info( } } SignatureNotCheckedSingle(attestation, err) => { - if let Error::UnknownHeadBlock { .. } = err { - if attestation.data.beacon_block_root == attestation.data.target.root { - return err; - } + if let Error::UnknownHeadBlock { .. } = err + && attestation.data.beacon_block_root == attestation.data.target.root + { + return err; } let fork_name = chain @@ -450,14 +450,13 @@ fn process_slash_info( SignatureValid(indexed, err) => (indexed, false, err), }; - if check_signature { - if let Err(e) = verify_attestation_signature(chain, &indexed_attestation) { - debug!( - error = ?e, - "Signature verification for slasher failed" - ); - return err; - } + if check_signature && let Err(e) = verify_attestation_signature(chain, &indexed_attestation) + { + debug!( + error = ?e, + "Signature verification for slasher failed" + ); + return err; } // Supply to slasher. @@ -601,7 +600,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { return Err(SignatureNotChecked( signed_aggregate.message().aggregate(), e, - )) + )); } }; @@ -677,7 +676,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { return Err(SignatureNotChecked( signed_aggregate.message().aggregate(), e, - )) + )); } }; Ok(IndexedAggregatedAttestation { @@ -1001,13 +1000,13 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { .map_err(BeaconChainError::from)?; // If a subnet was specified, ensure that subnet is correct. - if let Some(subnet_id) = subnet_id { - if subnet_id != expected_subnet_id { - return Err(Error::InvalidSubnetId { - received: subnet_id, - expected: expected_subnet_id, - }); - } + if let Some(subnet_id) = subnet_id + && subnet_id != expected_subnet_id + { + return Err(Error::InvalidSubnetId { + received: subnet_id, + expected: expected_subnet_id, + }); }; // Now that the attestation has been fully verified, store that we have received a valid // attestation from this validator. @@ -1150,13 +1149,13 @@ fn verify_head_block_is_known( if let Some(block) = block_opt { // Reject any block that exceeds our limit on skipped slots. - if let Some(max_skip_slots) = max_skip_slots { - if attestation_data.slot > block.slot + max_skip_slots { - return Err(Error::TooManySkippedSlots { - head_block_slot: block.slot, - attestation_slot: attestation_data.slot, - }); - } + if let Some(max_skip_slots) = max_skip_slots + && attestation_data.slot > block.slot + max_skip_slots + { + return Err(Error::TooManySkippedSlots { + head_block_slot: block.slot, + attestation_slot: attestation_data.slot, + }); } if !verify_attestation_is_finalized_checkpoint_or_descendant(attestation_data, chain) { diff --git a/beacon_node/beacon_chain/src/attestation_verification/batch.rs b/beacon_node/beacon_chain/src/attestation_verification/batch.rs index 266279432ef..c1087ef77ef 100644 --- a/beacon_node/beacon_chain/src/attestation_verification/batch.rs +++ b/beacon_node/beacon_chain/src/attestation_verification/batch.rs @@ -13,7 +13,7 @@ use super::{ CheckAttestationSignature, Error, IndexedAggregatedAttestation, IndexedUnaggregatedAttestation, VerifiedAggregatedAttestation, VerifiedUnaggregatedAttestation, }; -use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes, metrics}; use bls::verify_signature_sets; use state_processing::signature_sets::{ indexed_attestation_signature_set_from_pubkeys, signed_aggregate_selection_proof_signature_set, diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index 34a528f212f..f879adfb498 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -11,16 +11,16 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use parking_lot::RwLock; -use state_processing::state_advance::{partial_state_advance, Error as StateAdvanceError}; +use state_processing::state_advance::{Error as StateAdvanceError, partial_state_advance}; use std::collections::HashMap; use std::ops::Range; use types::{ + BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, + Hash256, RelativeEpoch, Slot, attestation::Error as AttestationError, beacon_state::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, }, - BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, - Hash256, RelativeEpoch, Slot, }; type JustifiedCheckpoint = Checkpoint; diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index ecaa4f45e74..ac4ed2ab678 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -15,8 +15,8 @@ use state_processing::{ }; use std::collections::HashSet; use store::{ - consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, RelativeEpoch, + consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, }; use tracing::error; use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, EthSpec}; diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index e37a69040db..3e52dc7a7d9 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -1,14 +1,14 @@ -use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BlockProcessStatus}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes, BlockProcessStatus, metrics}; use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; use logging::crit; use std::collections::HashMap; use std::sync::Arc; use store::{DatabaseBlock, ExecutionPayloadDeneb}; use tokio::sync::{ - mpsc::{self, UnboundedSender}, RwLock, + mpsc::{self, UnboundedSender}, }; -use tokio_stream::{wrappers::UnboundedReceiverStream, Stream}; +use tokio_stream::{Stream, wrappers::UnboundedReceiverStream}; use tracing::{debug, error}; use types::{ ChainSpec, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, Hash256, SignedBeaconBlock, @@ -106,7 +106,7 @@ fn reconstruct_default_header_block( "Block with fork variant {} has execution payload", fork )) - .into()) + .into()); } }; @@ -683,7 +683,7 @@ impl From for BeaconChainError { #[cfg(test)] mod tests { use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; - use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType}; + use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType, test_spec}; use execution_layer::test_utils::Block; use std::sync::Arc; use std::sync::LazyLock; diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9072e7f972f..26ef750c476 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,19 +1,19 @@ use crate::attestation_verification::{ - batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations, Error as AttestationError, VerifiedAggregatedAttestation, VerifiedAttestation, - VerifiedUnaggregatedAttestation, + VerifiedUnaggregatedAttestation, batch_verify_aggregated_attestations, + batch_verify_unaggregated_attestations, }; use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; -use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; +use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::POS_PANDA_BANNER; use crate::block_verification::{ + BlockError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, - signature_verify_chain_segment, verify_header_signature, BlockError, ExecutionPendingBlock, - GossipVerifiedBlock, IntoExecutionPendingBlock, + signature_verify_chain_segment, verify_header_signature, }; use crate::block_verification_types::{ AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RpcBlock, @@ -28,7 +28,7 @@ use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataC use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::events::ServerSentEventHandler; -use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle}; +use crate::execution_payload::{NotifyExecutionLayer, PreparePayloadHandle, get_execution_payload}; use crate::fetch_blobs::EngineGetBlobsOutput; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::graffiti_calculator::GraffitiCalculator; @@ -65,13 +65,13 @@ use crate::sync_committee_verification::{ }; use crate::validator_custody::CustodyContextSsz; use crate::validator_monitor::{ - get_slot_delay_ms, timestamp_now, ValidatorMonitor, - HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS, + HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS, ValidatorMonitor, get_slot_delay_ms, + timestamp_now, }; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ - metrics, AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, - BeaconSnapshot, CachedHead, + AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, + CachedHead, metrics, }; use eth2::types::{ EventKind, SseBlobSidecar, SseBlock, SseDataColumnSidecar, SseExtendedPayloadAttributes, @@ -85,8 +85,8 @@ use fork_choice::{ InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, }; use futures::channel::mpsc::Sender; -use itertools::process_results; use itertools::Itertools; +use itertools::process_results; use kzg::Kzg; use logging::crit; use operation_pool::{ @@ -100,16 +100,16 @@ use slasher::Slasher; use slot_clock::SlotClock; use ssz::Encode; use state_processing::{ + BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, VerifyBlockRoot, VerifyOperation, common::get_attesting_indices_from_state, epoch_cache::initialize_epoch_cache, per_block_processing, per_block_processing::{ - errors::AttestationValidationError, get_expected_withdrawals, - verify_attestation_for_block_inclusion, VerifySignatures, + VerifySignatures, errors::AttestationValidationError, get_expected_withdrawals, + verify_attestation_for_block_inclusion, }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, - BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, VerifyBlockRoot, VerifyOperation, }; use std::borrow::Cow; use std::cmp::Ordering; @@ -126,7 +126,7 @@ use store::{ }; use task_executor::{ShutdownReason, TaskExecutor}; use tokio_stream::Stream; -use tracing::{debug, debug_span, error, info, info_span, instrument, trace, warn, Span}; +use tracing::{Span, debug, debug_span, error, info, info_span, instrument, trace, warn}; use tree_hash::TreeHash; use types::blob_sidecar::FixedBlobSidecarList; use types::data_column_sidecar::ColumnIndex; @@ -978,10 +978,10 @@ impl BeaconChain { Ordering::Greater => state.get_block_root(request_slot).ok().copied(), }; - if let Some(request_root) = request_root_opt { - if let Ok(prev_root) = state.get_block_root(prev_slot) { - return Ok(Some((*prev_root != request_root).then_some(request_root))); - } + if let Some(request_root) = request_root_opt + && let Ok(prev_root) = state.get_block_root(prev_slot) + { + return Ok(Some((*prev_root != request_root).then_some(request_root))); } // Fast lookup is not possible. @@ -1993,7 +1993,7 @@ impl BeaconChain { return Err(Error::HeadBlockNotFullyVerified { beacon_block_root, execution_status, - }) + }); } None => return Err(Error::HeadMissingFromForkChoice(beacon_block_root)), }; @@ -2141,12 +2141,12 @@ impl BeaconChain { VerifiedAggregatedAttestation::verify(signed_aggregate, self).inspect(|v| { // This method is called for API and gossip attestations, so this covers all aggregated attestation events - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_attestation_subscribers() { - event_handler.register(EventKind::Attestation(Box::new( - v.attestation().clone_as_attestation(), - ))); - } + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_attestation_subscribers() + { + event_handler.register(EventKind::Attestation(Box::new( + v.attestation().clone_as_attestation(), + ))); } metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); }) @@ -2176,12 +2176,12 @@ impl BeaconChain { metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_PROCESSING_REQUESTS); let _timer = metrics::start_timer(&metrics::SYNC_CONTRIBUTION_GOSSIP_VERIFICATION_TIMES); VerifiedSyncContribution::verify(sync_contribution, self).inspect(|v| { - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_contribution_subscribers() { - event_handler.register(EventKind::ContributionAndProof(Box::new( - v.aggregate().clone(), - ))); - } + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_contribution_subscribers() + { + event_handler.register(EventKind::ContributionAndProof(Box::new( + v.aggregate().clone(), + ))); } metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_PROCESSING_SUCCESSES); }) @@ -2533,12 +2533,11 @@ impl BeaconChain { .verify_and_observe_at(exit, wall_clock_epoch, head_state, &self.spec) .inspect(|exit| { // this method is called for both API and gossip exits, so this covers all exit events - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_exit_subscribers() { - if let ObservationOutcome::New(exit) = exit.clone() { - event_handler.register(EventKind::VoluntaryExit(exit.into_inner())); - } - } + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_exit_subscribers() + && let ObservationOutcome::New(exit) = exit.clone() + { + event_handler.register(EventKind::VoluntaryExit(exit.into_inner())); } })?) } @@ -2567,12 +2566,12 @@ impl BeaconChain { &self, proposer_slashing: SigVerifiedOp, ) { - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_proposer_slashing_subscribers() { - event_handler.register(EventKind::ProposerSlashing(Box::new( - proposer_slashing.clone().into_inner(), - ))); - } + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_proposer_slashing_subscribers() + { + event_handler.register(EventKind::ProposerSlashing(Box::new( + proposer_slashing.clone().into_inner(), + ))); } self.op_pool.insert_proposer_slashing(proposer_slashing) @@ -2605,12 +2604,12 @@ impl BeaconChain { .fork_choice_write_lock() .on_attester_slashing(attester_slashing.as_inner().to_ref()); - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_attester_slashing_subscribers() { - event_handler.register(EventKind::AttesterSlashing(Box::new( - attester_slashing.clone().into_inner(), - ))); - } + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_attester_slashing_subscribers() + { + event_handler.register(EventKind::AttesterSlashing(Box::new( + attester_slashing.clone().into_inner(), + ))); } // Add to the op pool (if we have the ability to propose blocks). @@ -2678,12 +2677,12 @@ impl BeaconChain { bls_to_execution_change: SigVerifiedOp, received_pre_capella: ReceivedPreCapella, ) -> bool { - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_bls_to_execution_change_subscribers() { - event_handler.register(EventKind::BlsToExecutionChange(Box::new( - bls_to_execution_change.clone().into_inner(), - ))); - } + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_bls_to_execution_change_subscribers() + { + event_handler.register(EventKind::BlsToExecutionChange(Box::new( + bls_to_execution_change.clone().into_inner(), + ))); } self.op_pool @@ -2868,7 +2867,7 @@ impl BeaconChain { return ChainSegmentResult::Failed { imported_blocks, error: BlockError::BeaconChainError(error.into()), - } + }; } }; @@ -3125,14 +3124,12 @@ impl BeaconChain { .iter() .filter_map(|b| b.as_ref().map(|b| b.block_parent_root())) .next() - { - if !self + && !self .canonical_head .fork_choice_read_lock() .contains_block(&parent_root) - { - return Err(BlockError::ParentUnknown { parent_root }); - } + { + return Err(BlockError::ParentUnknown { parent_root }); } self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().flatten().map(Arc::as_ref)); @@ -3182,19 +3179,19 @@ impl BeaconChain { where I: Iterator>, { - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_blob_sidecar_subscribers() { - let imported_blobs = self - .data_availability_checker - .cached_blob_indexes(block_root) - .unwrap_or_default(); - let new_blobs = blobs_iter.filter(|b| !imported_blobs.contains(&b.index)); - - for blob in new_blobs { - event_handler.register(EventKind::BlobSidecar( - SseBlobSidecar::from_blob_sidecar(blob), - )); - } + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_blob_sidecar_subscribers() + { + let imported_blobs = self + .data_availability_checker + .cached_blob_indexes(block_root) + .unwrap_or_default(); + let new_blobs = blobs_iter.filter(|b| !imported_blobs.contains(&b.index)); + + for blob in new_blobs { + event_handler.register(EventKind::BlobSidecar(SseBlobSidecar::from_blob_sidecar( + blob, + ))); } } } @@ -3206,20 +3203,20 @@ impl BeaconChain { ) where I: Iterator>, { - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_data_column_sidecar_subscribers() { - let imported_data_columns = self - .data_availability_checker - .cached_data_column_indexes(block_root) - .unwrap_or_default(); - let new_data_columns = - data_columns_iter.filter(|b| !imported_data_columns.contains(&b.index)); - - for data_column in new_data_columns { - event_handler.register(EventKind::DataColumnSidecar( - SseDataColumnSidecar::from_data_column_sidecar(data_column), - )); - } + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_data_column_sidecar_subscribers() + { + let imported_data_columns = self + .data_availability_checker + .cached_data_column_indexes(block_root) + .unwrap_or_default(); + let new_data_columns = + data_columns_iter.filter(|b| !imported_data_columns.contains(&b.index)); + + for data_column in new_data_columns { + event_handler.register(EventKind::DataColumnSidecar( + SseDataColumnSidecar::from_data_column_sidecar(data_column), + )); } } } @@ -3254,14 +3251,13 @@ impl BeaconChain { // Reject RPC columns referencing unknown parents. Otherwise we allow potentially invalid data // into the da_checker, where invalid = descendant of invalid blocks. // Note: custody_columns should have at least one item and all items have the same parent root. - if let Some(parent_root) = custody_columns.iter().map(|c| c.block_parent_root()).next() { - if !self + if let Some(parent_root) = custody_columns.iter().map(|c| c.block_parent_root()).next() + && !self .canonical_head .fork_choice_read_lock() .contains_block(&parent_root) - { - return Err(BlockError::ParentUnknown { parent_root }); - } + { + return Err(BlockError::ParentUnknown { parent_root }); } self.emit_sse_data_column_sidecar_events( @@ -4155,37 +4151,36 @@ impl BeaconChain { // This ensures we only perform the check once. if current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch && wss_checkpoint.epoch <= new_finalized_checkpoint.epoch - { - if let Err(e) = + && let Err(e) = self.verify_weak_subjectivity_checkpoint(wss_checkpoint, block_root, state) - { - let mut shutdown_sender = self.shutdown_sender(); - crit!( - ?block_root, - parent_root = ?block.parent_root(), - old_finalized_epoch = ?current_head_finalized_checkpoint.epoch, - new_finalized_epoch = ?new_finalized_checkpoint.epoch, - weak_subjectivity_epoch = ?wss_checkpoint.epoch, - error = ?e, - "Weak subjectivity checkpoint verification failed while importing block!" - ); - crit!( - "You must use the `--purge-db` flag to clear the database and restart sync. \ + { + let mut shutdown_sender = self.shutdown_sender(); + crit!( + ?block_root, + parent_root = ?block.parent_root(), + old_finalized_epoch = ?current_head_finalized_checkpoint.epoch, + new_finalized_epoch = ?new_finalized_checkpoint.epoch, + weak_subjectivity_epoch = ?wss_checkpoint.epoch, + error = ?e, + "Weak subjectivity checkpoint verification failed while importing block!" + ); + crit!( + "You must use the `--purge-db` flag to clear the database and restart sync. \ You may be on a hostile network." - ); - shutdown_sender - .try_send(ShutdownReason::Failure( - "Weak subjectivity checkpoint verification failed. \ + ); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Weak subjectivity checkpoint verification failed. \ Provided block root is not a checkpoint.", + )) + .map_err(|err| { + BlockError::BeaconChainError(Box::new( + BeaconChainError::WeakSubjectivtyShutdownError(err), )) - .map_err(|err| { - BlockError::BeaconChainError(Box::new( - BeaconChainError::WeakSubjectivtyShutdownError(err), - )) - })?; - return Err(BlockError::WeakSubjectivityConflict); - } + })?; + return Err(BlockError::WeakSubjectivityConflict); } + Ok(()) } @@ -4413,35 +4408,32 @@ impl BeaconChain { ); } - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_block_subscribers() { - event_handler.register(EventKind::Block(SseBlock { - slot: block.slot(), - block: block_root, - execution_optimistic: payload_verification_status.is_optimistic(), - })); - } + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_block_subscribers() + { + event_handler.register(EventKind::Block(SseBlock { + slot: block.slot(), + block: block_root, + execution_optimistic: payload_verification_status.is_optimistic(), + })); } // Do not trigger light_client server update producer for old blocks, to extra work // during sync. if self.config.enable_light_client_server && block_delay_total < self.slot_clock.slot_duration() * 32 + && let Some(mut light_client_server_tx) = self.light_client_server_tx.clone() + && let Ok(sync_aggregate) = block.body().sync_aggregate() + && let Err(e) = light_client_server_tx.try_send(( + block.parent_root(), + block.slot(), + sync_aggregate.clone(), + )) { - if let Some(mut light_client_server_tx) = self.light_client_server_tx.clone() { - if let Ok(sync_aggregate) = block.body().sync_aggregate() { - if let Err(e) = light_client_server_tx.try_send(( - block.parent_root(), - block.slot(), - sync_aggregate.clone(), - )) { - warn!( - error = ?e, - "Failed to send light_client server event" - ); - } - } - } + warn!( + error = ?e, + "Failed to send light_client server event" + ); } } @@ -6066,21 +6058,21 @@ impl BeaconChain { }; // Push a server-sent event (probably to a block builder or relay). - if let Some(event_handler) = &self.event_handler { - if event_handler.has_payload_attributes_subscribers() { - event_handler.register(EventKind::PayloadAttributes(ForkVersionedResponse { - data: SseExtendedPayloadAttributes { - proposal_slot: prepare_slot, - proposer_index: proposer, - parent_block_root: head_root, - parent_block_number: pre_payload_attributes.parent_block_number, - parent_block_hash: forkchoice_update_params.head_hash.unwrap_or_default(), - payload_attributes: payload_attributes.into(), - }, - metadata: Default::default(), - version: self.spec.fork_name_at_slot::(prepare_slot), - })); - } + if let Some(event_handler) = &self.event_handler + && event_handler.has_payload_attributes_subscribers() + { + event_handler.register(EventKind::PayloadAttributes(ForkVersionedResponse { + data: SseExtendedPayloadAttributes { + proposal_slot: prepare_slot, + proposer_index: proposer, + parent_block_root: head_root, + parent_block_number: pre_payload_attributes.parent_block_number, + parent_block_hash: forkchoice_update_params.head_hash.unwrap_or_default(), + payload_attributes: payload_attributes.into(), + }, + metadata: Default::default(), + version: self.spec.fork_name_at_slot::(prepare_slot), + })); } let Some(till_prepare_slot) = self.slot_clock.duration_to_slot(prepare_slot) else { @@ -6540,14 +6532,14 @@ impl BeaconChain { self.task_executor.clone().spawn_blocking( move || { // Signal block proposal for the next slot (if it happens to be waiting). - if let Some(tx) = &chain.fork_choice_signal_tx { - if let Err(e) = tx.notify_fork_choice_complete(slot) { - warn!( - error = ?e, - %slot, - "Error signalling fork choice waiter" - ); - } + if let Some(tx) = &chain.fork_choice_signal_tx + && let Err(e) = tx.notify_fork_choice_complete(slot) + { + warn!( + error = ?e, + %slot, + "Error signalling fork choice waiter" + ); } }, "per_slot_task_fc_signal_tx", @@ -6867,10 +6859,9 @@ impl BeaconChain { .canonical_head .fork_choice_read_lock() .get_block_execution_status(parent_root) + && execution_status.is_strictly_optimistic() { - if execution_status.is_strictly_optimistic() { - return Ok(ChainHealth::Optimistic); - } + return Ok(ChainHealth::Optimistic); } if self.config.builder_fallback_disable_checks { diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index a6aedda19d0..8a2c0be0c89 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -4,7 +4,7 @@ //! Additionally, the `BalancesCache` struct is defined; a cache designed to avoid database //! reads when fork choice requires the validator balances of the justified state. -use crate::{metrics, BeaconSnapshot}; +use crate::{BeaconSnapshot, metrics}; use derivative::Derivative; use fork_choice::ForkChoiceStore; use proto_array::JustifiedBalances; diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 3ac30b74619..2ba20d5a82c 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -5,12 +5,12 @@ use std::sync::Arc; use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use crate::block_verification::{ - cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, - BlockSlashInfo, + BlockSlashInfo, cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, + process_block_slash_info, }; use crate::kzg_utils::{validate_blob, validate_blobs}; use crate::observed_data_sidecars::{ObservationStrategy, Observe}; -use crate::{metrics, BeaconChainError}; +use crate::{BeaconChainError, metrics}; use kzg::{Error as KzgError, Kzg, KzgCommitment}; use ssz_derive::{Decode, Encode}; use std::time::Duration; diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index 0809ce34ef0..f3924bb4733 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -1,7 +1,7 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; use operation_pool::{ - AttMaxCover, MaxCover, RewardCache, SplitAttestation, PROPOSER_REWARD_DENOMINATOR, + AttMaxCover, MaxCover, PROPOSER_REWARD_DENOMINATOR, RewardCache, SplitAttestation, }; use state_processing::{ common::get_attesting_indices_from_state, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 39d0a4b280f..6056fac2b81 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -54,16 +54,17 @@ use crate::block_verification_types::{AsBlock, BlockImportData, RpcBlock}; use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock}; use crate::data_column_verification::GossipDataColumnError; use crate::execution_payload::{ - validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, - NotifyExecutionLayer, PayloadNotifier, + AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, + validate_execution_payload_for_gossip, validate_merge_block, }; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::SeenBlock; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ + BeaconChain, BeaconChainError, BeaconChainTypes, beacon_chain::{BeaconForkChoice, ForkChoiceError}, - metrics, BeaconChain, BeaconChainError, BeaconChainTypes, + metrics, }; use derivative::Derivative; use eth2::types::{BlockGossip, EventKind}; @@ -78,11 +79,11 @@ use ssz::Encode; use ssz_derive::{Decode, Encode}; use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block}; use state_processing::{ + AllCaches, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, + VerifyBlockRoot, block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, per_block_processing, per_slot_processing, state_advance::partial_state_advance, - AllCaches, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, - VerifyBlockRoot, }; use std::borrow::Cow; use std::fmt::Debug; @@ -92,12 +93,12 @@ use std::sync::Arc; use store::{Error as DBError, KeyValueStore}; use strum::AsRefStr; use task_executor::JoinHandle; -use tracing::{debug, debug_span, error, info_span, instrument, Instrument, Span}; +use tracing::{Instrument, Span, debug, debug_span, error, info_span, instrument}; use types::{ - data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, - BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, - Hash256, InconsistentFork, KzgProofs, PublicKey, PublicKeyBytes, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, + Epoch, EthSpec, ExecutionBlockHash, FullPayload, Hash256, InconsistentFork, KzgProofs, + PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + data_column_sidecar::DataColumnSidecarError, }; pub const POS_PANDA_BANNER: &str = r#" @@ -1043,7 +1044,7 @@ impl GossipVerifiedBlock { return Err(BlockError::Slashable); } SeenBlock::Duplicate => { - return Err(BlockError::DuplicateImportStatusUnknown(block_root)) + return Err(BlockError::DuplicateImportStatusUnknown(block_root)); } SeenBlock::UniqueNonSlashable => {} }; @@ -1059,13 +1060,13 @@ impl GossipVerifiedBlock { validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?; // Beacon API block_gossip events - if let Some(event_handler) = chain.event_handler.as_ref() { - if event_handler.has_block_gossip_subscribers() { - event_handler.register(EventKind::BlockGossip(Box::new(BlockGossip { - slot: block.slot(), - block: block_root, - }))); - } + if let Some(event_handler) = chain.event_handler.as_ref() + && event_handler.has_block_gossip_subscribers() + { + event_handler.register(EventKind::BlockGossip(Box::new(BlockGossip { + slot: block.slot(), + block: block_root, + }))); } // Having checked the proposer index and the block root we can cache them. @@ -1593,18 +1594,18 @@ impl ExecutionPendingBlock { * If we have block reward listeners, compute the block reward and push it to the * event handler. */ - if let Some(ref event_handler) = chain.event_handler { - if event_handler.has_block_reward_subscribers() { - let mut reward_cache = Default::default(); - let block_reward = chain.compute_block_reward( - block.message(), - block_root, - &state, - &mut reward_cache, - true, - )?; - event_handler.register(EventKind::BlockReward(block_reward)); - } + if let Some(ref event_handler) = chain.event_handler + && event_handler.has_block_reward_subscribers() + { + let mut reward_cache = Default::default(); + let block_reward = chain.compute_block_reward( + block.message(), + block_root, + &state, + &mut reward_cache, + true, + )?; + event_handler.register(EventKind::BlockReward(block_reward)); } /* diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 5917e6f6bea..5841eacf581 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -1,7 +1,7 @@ use crate::data_availability_checker::AvailabilityCheckError; pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; use crate::data_column_verification::{CustodyDataColumn, CustodyDataColumnList}; -use crate::{get_block_root, PayloadVerificationOutcome}; +use crate::{PayloadVerificationOutcome, get_block_root}; use derivative::Derivative; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index c46cc015c9c..3e5e7a1eeef 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,5 +1,7 @@ +use crate::ChainConfig; +use crate::CustodyContext; use crate::beacon_chain::{ - CanonicalHead, LightClientProducerEvent, BEACON_CHAIN_DB_KEY, OP_POOL_DB_KEY, + BEACON_CHAIN_DB_KEY, CanonicalHead, LightClientProducerEvent, OP_POOL_DB_KEY, }; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::data_availability_checker::DataAvailabilityChecker; @@ -15,8 +17,6 @@ use crate::persisted_custody::load_custody_context; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}; use crate::validator_pubkey_cache::ValidatorPubkeyCache; -use crate::ChainConfig; -use crate::CustodyContext; use crate::{ BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, ServerSentEventHandler, }; @@ -32,7 +32,7 @@ use rand::RngCore; use rayon::prelude::*; use slasher::Slasher; use slot_clock::{SlotClock, TestingSlotClock}; -use state_processing::{per_slot_processing, AllCaches}; +use state_processing::{AllCaches, per_slot_processing}; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; @@ -1037,23 +1037,25 @@ where .map_err(|e| format!("Failed to prime attester cache: {:?}", e))?; // Only perform the check if it was configured. - if let Some(wss_checkpoint) = beacon_chain.config.weak_subjectivity_checkpoint { - if let Err(e) = beacon_chain.verify_weak_subjectivity_checkpoint( + if let Some(wss_checkpoint) = beacon_chain.config.weak_subjectivity_checkpoint + && let Err(e) = beacon_chain.verify_weak_subjectivity_checkpoint( wss_checkpoint, head.beacon_block_root, &head.beacon_state, - ) { - crit!( - head_block_root = %head.beacon_block_root, - head_slot = %head.beacon_block.slot(), - finalized_epoch = %head.beacon_state.finalized_checkpoint().epoch, - wss_checkpoint_epoch = %wss_checkpoint.epoch, - error = ?e, - "Weak subjectivity checkpoint verification failed on startup!" - ); - crit!("You must use the `--purge-db` flag to clear the database and restart sync. You may be on a hostile network."); - return Err(format!("Weak subjectivity verification failed: {:?}", e)); - } + ) + { + crit!( + head_block_root = %head.beacon_block_root, + head_slot = %head.beacon_block.slot(), + finalized_epoch = %head.beacon_state.finalized_checkpoint().epoch, + wss_checkpoint_epoch = %wss_checkpoint.epoch, + error = ?e, + "Weak subjectivity checkpoint verification failed on startup!" + ); + crit!( + "You must use the `--purge-db` flag to clear the database and restart sync. You may be on a hostile network." + ); + return Err(format!("Weak subjectivity verification failed: {:?}", e)); } info!( @@ -1198,13 +1200,13 @@ fn build_data_columns_from_blobs( #[cfg(test)] mod test { use super::*; - use crate::test_utils::{get_kzg, EphemeralHarnessType}; + use crate::test_utils::{EphemeralHarnessType, get_kzg}; use ethereum_hashing::hash; use genesis::{ - generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, + DEFAULT_ETH1_BLOCK_HASH, generate_deterministic_keypairs, interop_genesis_state, }; - use rand::rngs::StdRng; use rand::SeedableRng; + use rand::rngs::StdRng; use ssz::Encode; use std::time::Duration; use store::config::StoreConfig; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 238262bd7fb..493baf513e8 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -34,12 +34,12 @@ use crate::persisted_fork_choice::PersistedForkChoice; use crate::shuffling_cache::BlockShufflingIds; use crate::{ - beacon_chain::{BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate, FORK_CHOICE_DB_KEY}, + BeaconChain, BeaconChainError as Error, BeaconChainTypes, BeaconSnapshot, + beacon_chain::{BeaconForkChoice, BeaconStore, FORK_CHOICE_DB_KEY, OverrideForkchoiceUpdate}, block_times_cache::BlockTimesCache, events::ServerSentEventHandler, metrics, validator_monitor::get_slot_delay_ms, - BeaconChain, BeaconChainError as Error, BeaconChainTypes, BeaconSnapshot, }; use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; use fork_choice::{ @@ -53,7 +53,7 @@ use slot_clock::SlotClock; use state_processing::AllCaches; use std::sync::Arc; use std::time::Duration; -use store::{iter::StateRootsIterator, KeyValueStore, KeyValueStoreOp, StoreItem}; +use store::{KeyValueStore, KeyValueStoreOp, StoreItem, iter::StateRootsIterator}; use task_executor::{JoinHandle, ShutdownReason}; use tracing::{debug, error, info, instrument, warn}; use types::*; @@ -719,15 +719,14 @@ impl BeaconChain { let old_snapshot = &old_cached_head.snapshot; // If the head changed, perform some updates. - if new_snapshot.beacon_block_root != old_snapshot.beacon_block_root { - if let Err(e) = + if new_snapshot.beacon_block_root != old_snapshot.beacon_block_root + && let Err(e) = self.after_new_head(&old_cached_head, &new_cached_head, new_head_proto_block) - { - crit!( - error = ?e, - "Error updating canonical head" - ); - } + { + crit!( + error = ?e, + "Error updating canonical head" + ); } // Drop the old cache head nice and early to try and free the memory as soon as possible. @@ -737,15 +736,14 @@ impl BeaconChain { // // The `after_finalization` function will take a write-lock on `fork_choice`, therefore it // is a dead-lock risk to hold any other lock on fork choice at this point. - if new_view.finalized_checkpoint != old_view.finalized_checkpoint { - if let Err(e) = + if new_view.finalized_checkpoint != old_view.finalized_checkpoint + && let Err(e) = self.after_finalization(&new_cached_head, new_view, finalized_proto_block) - { - crit!( - error = ?e, - "Error updating finalization" - ); - } + { + crit!( + error = ?e, + "Error updating finalization" + ); } // The execution layer updates might attempt to take a write-lock on fork choice, so it's @@ -873,23 +871,22 @@ impl BeaconChain { } // Register a server-sent-event for a reorg (if necessary). - if let Some(depth) = reorg_distance { - if let Some(event_handler) = self + if let Some(depth) = reorg_distance + && let Some(event_handler) = self .event_handler .as_ref() .filter(|handler| handler.has_reorg_subscribers()) - { - event_handler.register(EventKind::ChainReorg(SseChainReorg { - slot: head_slot, - depth: depth.as_u64(), - old_head_block: old_snapshot.beacon_block_root, - old_head_state: old_snapshot.beacon_state_root(), - new_head_block: new_snapshot.beacon_block_root, - new_head_state: new_snapshot.beacon_state_root(), - epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), - execution_optimistic: new_head_is_optimistic, - })); - } + { + event_handler.register(EventKind::ChainReorg(SseChainReorg { + slot: head_slot, + depth: depth.as_u64(), + old_head_block: old_snapshot.beacon_block_root, + old_head_state: old_snapshot.beacon_state_root(), + new_head_block: new_snapshot.beacon_block_root, + new_head_state: new_snapshot.beacon_state_root(), + epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), + execution_optimistic: new_head_is_optimistic, + })); } Ok(()) @@ -943,18 +940,18 @@ impl BeaconChain { self.attester_cache .prune_below(new_view.finalized_checkpoint.epoch); - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_finalized_subscribers() { - event_handler.register(EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { - epoch: new_view.finalized_checkpoint.epoch, - block: new_view.finalized_checkpoint.root, - // Provide the state root of the latest finalized block, rather than the - // specific state root at the first slot of the finalized epoch (which - // might be a skip slot). - state: finalized_proto_block.state_root, - execution_optimistic: finalized_block_is_optimistic, - })); - } + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_finalized_subscribers() + { + event_handler.register(EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { + epoch: new_view.finalized_checkpoint.epoch, + block: new_view.finalized_checkpoint.root, + // Provide the state root of the latest finalized block, rather than the + // specific state root at the first slot of the finalized epoch (which + // might be a skip slot). + state: finalized_proto_block.state_root, + execution_optimistic: finalized_block_is_optimistic, + })); } // The store migration task requires the *state at the slot of the finalized epoch*, @@ -1440,23 +1437,23 @@ fn observe_head_block_delays( set_as_head_time_ms = format_delay(&block_delays.set_as_head), "Delayed head block" ); - if let Some(event_handler) = event_handler { - if event_handler.has_late_head_subscribers() { - let peer_info = block_times_cache.get_peer_info(head_block_root); - event_handler.register(EventKind::LateHead(SseLateHead { - slot: head_block_slot, - block: head_block_root, - peer_id: peer_info.id, - peer_client: peer_info.client, - proposer_index: head_block_proposer_index, - proposer_graffiti: head_block_graffiti, - block_delay: block_delay_total, - observed_delay: block_delays.observed, - imported_delay: block_delays.imported, - set_as_head_delay: block_delays.set_as_head, - execution_optimistic: head_block_is_optimistic, - })); - } + if let Some(event_handler) = event_handler + && event_handler.has_late_head_subscribers() + { + let peer_info = block_times_cache.get_peer_info(head_block_root); + event_handler.register(EventKind::LateHead(SseLateHead { + slot: head_block_slot, + block: head_block_root, + peer_id: peer_info.id, + peer_client: peer_info.client, + proposer_index: head_block_proposer_index, + proposer_graffiti: head_block_graffiti, + block_delay: block_delay_total, + observed_delay: block_delays.observed, + imported_delay: block_delays.imported, + set_as_head_delay: block_delays.set_as_head, + execution_optimistic: head_block_is_optimistic, + })); } } else { debug!( diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 5e8c3aab35f..9918ec585a1 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -1,5 +1,5 @@ use crate::blob_verification::{ - verify_kzg_for_blob_list, GossipVerifiedBlob, KzgVerifiedBlob, KzgVerifiedBlobList, + GossipVerifiedBlob, KzgVerifiedBlob, KzgVerifiedBlobList, verify_kzg_for_blob_list, }; use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, @@ -7,7 +7,7 @@ use crate::block_verification_types::{ use crate::data_availability_checker::overflow_lru_cache::{ DataAvailabilityCheckerInner, ReconstructColumnsDecision, }; -use crate::{metrics, BeaconChain, BeaconChainTypes, BeaconStore, CustodyContext}; +use crate::{BeaconChain, BeaconChainTypes, BeaconStore, CustodyContext, metrics}; use kzg::Kzg; use slot_clock::SlotClock; use std::fmt; @@ -28,8 +28,8 @@ mod overflow_lru_cache; mod state_lru_cache; use crate::data_column_verification::{ - verify_kzg_for_data_column_list_with_scoring, CustodyDataColumn, GossipVerifiedDataColumn, - KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, + CustodyDataColumn, GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn, + KzgVerifiedDataColumn, verify_kzg_for_data_column_list_with_scoring, }; use crate::metrics::{ KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS, KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES, @@ -837,13 +837,13 @@ impl MaybeAvailableBlock { #[cfg(test)] mod test { use super::*; + use crate::CustodyContext; use crate::test_utils::{ - generate_rand_block_and_data_columns, get_kzg, EphemeralHarnessType, NumBlobs, + EphemeralHarnessType, NumBlobs, generate_rand_block_and_data_columns, get_kzg, }; - use crate::CustodyContext; + use rand::SeedableRng; use rand::prelude::StdRng; use rand::seq::SliceRandom; - use rand::SeedableRng; use slot_clock::{SlotClock, TestingSlotClock}; use std::collections::HashSet; use std::sync::Arc; diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index e2183cd950f..bc2b2175352 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -1,5 +1,7 @@ -use super::state_lru_cache::{DietAvailabilityPendingExecutedBlock, StateLRUCache}; use super::AvailableBlockData; +use super::state_lru_cache::{DietAvailabilityPendingExecutedBlock, StateLRUCache}; +use crate::BeaconChainTypes; +use crate::CustodyContext; use crate::beacon_chain::BeaconStore; use crate::blob_verification::KzgVerifiedBlob; use crate::block_verification_types::{ @@ -7,14 +9,12 @@ use crate::block_verification_types::{ }; use crate::data_availability_checker::{Availability, AvailabilityCheckError}; use crate::data_column_verification::KzgVerifiedCustodyDataColumn; -use crate::BeaconChainTypes; -use crate::CustodyContext; use lru::LruCache; use parking_lot::RwLock; use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; -use tracing::{debug, debug_span, Span}; +use tracing::{Span, debug, debug_span}; use types::blob_sidecar::BlobIdentifier; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, Epoch, EthSpec, @@ -124,10 +124,10 @@ impl PendingComponents { pub fn merge_single_blob(&mut self, index: usize, blob: KzgVerifiedBlob) { if let Some(cached_block) = self.get_cached_block() { let block_commitment_opt = cached_block.get_commitments().get(index).copied(); - if let Some(block_commitment) = block_commitment_opt { - if block_commitment == *blob.get_commitment() { - self.insert_blob_at_index(index, blob) - } + if let Some(block_commitment) = block_commitment_opt + && block_commitment == *blob.get_commitment() + { + self.insert_blob_at_index(index, blob) } } else if !self.blob_exists(index) { self.insert_blob_at_index(index, blob) @@ -683,10 +683,10 @@ impl DataAvailabilityCheckerInner { let mut write_lock = self.critical.write(); let mut keys_to_remove = vec![]; for (key, value) in write_lock.iter() { - if let Some(epoch) = value.epoch() { - if epoch < cutoff_epoch { - keys_to_remove.push(*key); - } + if let Some(epoch) = value.epoch() + && epoch < cutoff_epoch + { + keys_to_remove.push(*key); } } // Now remove keys @@ -729,8 +729,8 @@ mod test { use logging::create_test_tracing_subscriber; use state_processing::ConsensusContext; use std::collections::VecDeque; - use store::{database::interface::BeaconNodeBackend, HotColdDB, ItemStore, StoreConfig}; - use tempfile::{tempdir, TempDir}; + use store::{HotColdDB, ItemStore, StoreConfig, database::interface::BeaconNodeBackend}; + use tempfile::{TempDir, tempdir}; use tracing::{debug_span, info}; use types::non_zero_usize::new_non_zero_usize; use types::{ExecPayload, MinimalEthSpec}; @@ -905,10 +905,10 @@ mod test { where E: EthSpec, T: BeaconChainTypes< - HotStore = BeaconNodeBackend, - ColdStore = BeaconNodeBackend, - EthSpec = E, - >, + HotStore = BeaconNodeBackend, + ColdStore = BeaconNodeBackend, + EthSpec = E, + >, { create_test_tracing_subscriber(); let chain_db_path = tempdir().expect("should get temp dir"); @@ -1174,13 +1174,13 @@ mod test { #[cfg(test)] mod pending_components_tests { use super::*; - use crate::block_verification_types::BlockImportData; - use crate::test_utils::{generate_rand_block_and_blobs, test_spec, NumBlobs}; use crate::PayloadVerificationOutcome; + use crate::block_verification_types::BlockImportData; + use crate::test_utils::{NumBlobs, generate_rand_block_and_blobs, test_spec}; use fork_choice::PayloadVerificationStatus; use kzg::KzgCommitment; - use rand::rngs::StdRng; use rand::SeedableRng; + use rand::rngs::StdRng; use state_processing::ConsensusContext; use types::test_utils::TestRandom; use types::{ diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index 547bb160be7..e328bd9b9c6 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -1,15 +1,15 @@ use crate::block_verification_types::AsBlock; use crate::{ + AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome, block_verification_types::BlockImportData, data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY_NON_ZERO}, - AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome, }; use lru::LruCache; use parking_lot::RwLock; use state_processing::BlockReplayer; use std::sync::Arc; use store::OnDiskConsensusContext; -use tracing::{debug_span, instrument, Span}; +use tracing::{Span, debug_span, instrument}; use types::beacon_block_body::KzgCommitments; use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 26d08e1dc6c..3b444860e07 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -1,11 +1,11 @@ use crate::beacon_proposer_cache::EpochBlockProposers; use crate::block_verification::{ - cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, - BlockSlashInfo, + BlockSlashInfo, cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, + process_block_slash_info, }; use crate::kzg_utils::{reconstruct_data_columns, validate_data_columns}; use crate::observed_data_sidecars::{ObservationStrategy, Observe}; -use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes, metrics}; use derivative::Derivative; use fork_choice::ProtoBlock; use kzg::{Error as KzgError, Kzg}; @@ -819,7 +819,7 @@ pub fn observe_gossip_data_column( #[cfg(test)] mod test { use crate::data_column_verification::{ - validate_data_column_sidecar_for_gossip, GossipDataColumnError, + GossipDataColumnError, validate_data_column_sidecar_for_gossip, }; use crate::observed_data_sidecars::Observe; use crate::test_utils::BeaconChainHarness; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index b6db3fa84f2..a1a0ec74f66 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -16,6 +16,7 @@ use operation_pool::OpPoolError; use safe_arith::ArithError; use ssz_types::Error as SszTypesError; use state_processing::{ + BlockProcessingError, BlockReplayError, EpochProcessingError, SlotProcessingError, block_signature_verifier::Error as BlockSignatureVerifierError, per_block_processing::errors::{ AttestationValidationError, AttesterSlashingValidationError, @@ -24,7 +25,6 @@ use state_processing::{ }, signature_sets::Error as SignatureSetError, state_advance::Error as StateAdvanceError, - BlockProcessingError, BlockReplayError, EpochProcessingError, SlotProcessingError, }; use task_executor::ShutdownReason; use tokio::task::JoinError; diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 94ebfb46557..63be944eea2 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -1,6 +1,6 @@ pub use eth2::types::{EventKind, SseBlock, SseFinalizedCheckpoint, SseHead}; use tokio::sync::broadcast; -use tokio::sync::broadcast::{error::SendError, Receiver, Sender}; +use tokio::sync::broadcast::{Receiver, Sender, error::SendError}; use tracing::trace; use types::EthSpec; diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index aa98310c121..697fee351e1 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -310,7 +310,7 @@ pub fn validate_execution_payload_for_gossip( ExecutionStatus::Invalid(_) => { return Err(BlockError::ParentExecutionPayloadInvalid { parent_root: parent_block.root, - }) + }); } }; diff --git a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs index 09c3b30d1cf..4c6b2d10a95 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs @@ -21,18 +21,18 @@ use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::ProposalKey; use crate::validator_monitor::timestamp_now; use crate::{ - metrics, AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, - BlockError, + AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, + metrics, }; -use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2}; use execution_layer::Error as ExecutionLayerError; -use metrics::{inc_counter, TryExt}; +use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2}; +use metrics::{TryExt, inc_counter}; #[cfg(test)] use mockall_double::double; use ssz_types::FixedVector; use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use std::sync::Arc; -use tracing::{debug, instrument, warn, Span}; +use tracing::{Span, debug, instrument, warn}; use types::blob_sidecar::BlobSidecarError; use types::data_column_sidecar::DataColumnSidecarError; use types::{ diff --git a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs index a59db19fca9..e4855dd5598 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs @@ -1,9 +1,9 @@ +use crate::AvailabilityProcessingStatus; use crate::fetch_blobs::fetch_blobs_beacon_adapter::MockFetchBlobsBeaconAdapter; use crate::fetch_blobs::{ - fetch_and_process_engine_blobs_inner, EngineGetBlobsOutput, FetchEngineBlobError, + EngineGetBlobsOutput, FetchEngineBlobError, fetch_and_process_engine_blobs_inner, }; -use crate::test_utils::{get_kzg, EphemeralHarnessType}; -use crate::AvailabilityProcessingStatus; +use crate::test_utils::{EphemeralHarnessType, get_kzg}; use bls::Signature; use eth2::types::BlobsBundle; use execution_layer::json_structures::{BlobAndProof, BlobAndProofV1, BlobAndProofV2}; diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index cde2950c89b..b0cb3b5d9d3 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -3,12 +3,12 @@ use fork_choice::{ForkChoice, PayloadVerificationStatus}; use itertools::process_results; use state_processing::state_advance::complete_state_advance; use state_processing::{ - per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext, - VerifyBlockRoot, + ConsensusContext, VerifyBlockRoot, per_block_processing, + per_block_processing::BlockSignatureStrategy, }; use std::sync::Arc; use std::time::Duration; -use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore}; +use store::{HotColdDB, ItemStore, iter::ParentRootBlockIterator}; use tracing::{info, warn}; use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index 23d1d69b1ca..e8110d14cdc 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -1,13 +1,13 @@ use crate::BeaconChain; use crate::BeaconChainTypes; -use execution_layer::{http::ENGINE_GET_CLIENT_VERSION_V1, CommitPrefix, ExecutionLayer}; +use execution_layer::{CommitPrefix, ExecutionLayer, http::ENGINE_GET_CLIENT_VERSION_V1}; use logging::crit; use serde::{Deserialize, Serialize}; use slot_clock::SlotClock; use std::{fmt::Debug, time::Duration}; use task_executor::TaskExecutor; use tracing::{debug, error, warn}; -use types::{EthSpec, Graffiti, GRAFFITI_BYTES_LEN}; +use types::{EthSpec, GRAFFITI_BYTES_LEN, Graffiti}; const ENGINE_VERSION_AGE_LIMIT_EPOCH_MULTIPLE: u32 = 6; // 6 epochs const ENGINE_VERSION_CACHE_REFRESH_EPOCH_MULTIPLE: u32 = 2; // 2 epochs @@ -84,7 +84,9 @@ impl GraffitiCalculator { let Some(execution_layer) = self.execution_layer.as_ref() else { // Return default graffiti if there is no execution layer. This // shouldn't occur if we're actually producing blocks. - crit!("No execution layer available for graffiti calculation during block production!"); + crit!( + "No execution layer available for graffiti calculation during block production!" + ); return default_graffiti; }; @@ -221,15 +223,15 @@ async fn engine_version_cache_refresh_service( #[cfg(test)] mod tests { - use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType}; use crate::ChainConfig; - use execution_layer::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_ENGINE_CAPABILITIES}; + use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType, test_spec}; use execution_layer::EngineCapabilities; + use execution_layer::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_ENGINE_CAPABILITIES}; use std::sync::Arc; use std::sync::LazyLock; use std::time::Duration; use tracing::info; - use types::{ChainSpec, Graffiti, Keypair, MinimalEthSpec, GRAFFITI_BYTES_LEN}; + use types::{ChainSpec, GRAFFITI_BYTES_LEN, Graffiti, Keypair, MinimalEthSpec}; const VALIDATOR_COUNT: usize = 48; /// A cached set of keys. diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index e929d3e3c1a..8b9fb5e3549 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,9 +1,9 @@ use crate::data_availability_checker::{AvailableBlock, AvailableBlockData}; -use crate::{metrics, BeaconChain, BeaconChainTypes}; +use crate::{BeaconChain, BeaconChainTypes, metrics}; use itertools::Itertools; use state_processing::{ per_block_processing::ParallelSignatureSets, - signature_sets::{block_proposal_signature_set_from_parts, Error as SignatureSetError}, + signature_sets::{Error as SignatureSetError, block_proposal_signature_set_from_parts}, }; use std::borrow::Cow; use std::iter; @@ -237,30 +237,30 @@ impl BeaconChain { let mut anchor_and_blob_batch = Vec::with_capacity(3); // Update the blob info. - if new_oldest_blob_slot != blob_info.oldest_blob_slot { - if let Some(oldest_blob_slot) = new_oldest_blob_slot { - let new_blob_info = BlobInfo { - oldest_blob_slot: Some(oldest_blob_slot), - ..blob_info.clone() - }; - anchor_and_blob_batch.push( - self.store - .compare_and_set_blob_info(blob_info, new_blob_info)?, - ); - } + if new_oldest_blob_slot != blob_info.oldest_blob_slot + && let Some(oldest_blob_slot) = new_oldest_blob_slot + { + let new_blob_info = BlobInfo { + oldest_blob_slot: Some(oldest_blob_slot), + ..blob_info.clone() + }; + anchor_and_blob_batch.push( + self.store + .compare_and_set_blob_info(blob_info, new_blob_info)?, + ); } // Update the data column info. - if new_oldest_data_column_slot != data_column_info.oldest_data_column_slot { - if let Some(oldest_data_column_slot) = new_oldest_data_column_slot { - let new_data_column_info = DataColumnInfo { - oldest_data_column_slot: Some(oldest_data_column_slot), - }; - anchor_and_blob_batch.push( - self.store - .compare_and_set_data_column_info(data_column_info, new_data_column_info)?, - ); - } + if new_oldest_data_column_slot != data_column_info.oldest_data_column_slot + && let Some(oldest_data_column_slot) = new_oldest_data_column_slot + { + let new_data_column_info = DataColumnInfo { + oldest_data_column_slot: Some(oldest_data_column_slot), + }; + anchor_and_blob_batch.push( + self.store + .compare_and_set_data_column_info(data_column_info, new_data_column_info)?, + ); } // Update the anchor. diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 18159a59e5d..b9beea5b7ca 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -1,6 +1,6 @@ use kzg::{ - Blob as KzgBlob, Bytes48, Cell as KzgCell, CellRef as KzgCellRef, CellsAndKzgProofs, - Error as KzgError, Kzg, CELLS_PER_EXT_BLOB, + Blob as KzgBlob, Bytes48, CELLS_PER_EXT_BLOB, Cell as KzgCell, CellRef as KzgCellRef, + CellsAndKzgProofs, Error as KzgError, Kzg, }; use rayon::prelude::*; use ssz_types::{FixedVector, VariableList}; @@ -423,10 +423,11 @@ mod test { use bls::Signature; use eth2::types::BlobsBundle; use execution_layer::test_utils::generate_blobs; - use kzg::{trusted_setup::get_trusted_setup, Kzg, KzgCommitment, TrustedSetup}; + use kzg::{Kzg, KzgCommitment, TrustedSetup, trusted_setup::get_trusted_setup}; use types::{ - beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockFulu, BlobsList, ChainSpec, - EmptyBlock, EthSpec, ForkName, FullPayload, KzgProofs, MainnetEthSpec, SignedBeaconBlock, + BeaconBlock, BeaconBlockFulu, BlobsList, ChainSpec, EmptyBlock, EthSpec, ForkName, + FullPayload, KzgProofs, MainnetEthSpec, SignedBeaconBlock, + beacon_block_body::KzgCommitments, }; type E = MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index df253bf72c0..e4d17ab831f 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -65,10 +65,9 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, BlockProcessStatus, - ChainSegmentResult, ForkChoiceError, LightClientProducerEvent, OverrideForkchoiceUpdate, + ChainSegmentResult, ForkChoiceError, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, LightClientProducerEvent, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, - INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; @@ -77,9 +76,10 @@ pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{ - build_blob_data_column_sidecars, get_block_root, BlockError, ExecutionPayloadError, - ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, IntoGossipVerifiedBlock, - InvalidSignature, PayloadVerificationOutcome, PayloadVerificationStatus, + BlockError, ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock, + IntoExecutionPendingBlock, IntoGossipVerifiedBlock, InvalidSignature, + PayloadVerificationOutcome, PayloadVerificationStatus, build_blob_data_column_sidecars, + get_block_root, }; pub use block_verification_types::AvailabilityPendingExecutedBlock; pub use block_verification_types::ExecutedBlock; diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index 22122ee5547..487ddfd3ec9 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,5 +1,5 @@ use crate::errors::BeaconChainError; -use crate::{metrics, BeaconChainTypes, BeaconStore}; +use crate::{BeaconChainTypes, BeaconStore, metrics}; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; use ssz::Decode; @@ -223,10 +223,9 @@ impl LightClientServerCache { ) -> Result<(), BeaconChainError> { if let Some(latest_sync_committee) = self.latest_written_current_sync_committee.read().clone() + && latest_sync_committee == cached_parts.current_sync_committee { - if latest_sync_committee == cached_parts.current_sync_committee { - return Ok(()); - } + return Ok(()); }; if finalized_period + 1 >= sync_committee_period { @@ -465,9 +464,9 @@ impl LightClientServerCache { }; if sync_committee_period > finalized_period { - return Err(BeaconChainError::LightClientBootstrapError( - format!("The blocks sync committee period {sync_committee_period} is greater than the current finalized period {finalized_period}"), - )); + return Err(BeaconChainError::LightClientBootstrapError(format!( + "The blocks sync committee period {sync_committee_period} is greater than the current finalized period {finalized_period}" + ))); } let Some(current_sync_committee) = store.get_sync_committee(sync_committee_period)? else { diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 2d37e380d5b..4471b0a93f1 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -408,9 +408,9 @@ pub static ATTESTATION_PROCESSING_BATCH_AGG_SIGNATURE_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram( - "beacon_attestation_processing_batch_unagg_signature_setup_times", - "Time spent on setting up for the signature verification of batch unaggregate processing" - ) + "beacon_attestation_processing_batch_unagg_signature_setup_times", + "Time spent on setting up for the signature verification of batch unaggregate processing", + ) }); pub static ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_TIMES: LazyLock> = LazyLock::new(|| { @@ -826,17 +826,17 @@ pub static ATTN_OBSERVATION_PREV_EPOCH_AGGREGATORS: LazyLock> = pub static SYNC_COMM_OBSERVATION_PREV_SLOT_SIGNERS: LazyLock> = LazyLock::new( || { try_create_int_gauge( - "beacon_sync_comm_observation_slot_signers", - "Count of sync committee contributors that have been seen by the beacon chain in the previous slot" - ) + "beacon_sync_comm_observation_slot_signers", + "Count of sync committee contributors that have been seen by the beacon chain in the previous slot", + ) }, ); pub static SYNC_COMM_OBSERVATION_PREV_SLOT_AGGREGATORS: LazyLock> = LazyLock::new( || { try_create_int_gauge( - "beacon_sync_comm_observation_slot_aggregators", - "Count of sync committee aggregators that have been seen by the beacon chain in the previous slot" - ) + "beacon_sync_comm_observation_slot_aggregators", + "Count of sync committee aggregators that have been seen by the beacon chain in the previous slot", + ) }, ); @@ -997,10 +997,10 @@ pub static VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_MIN_DELAY_SECONDS: LazyLock Result, > = LazyLock::new(|| { try_create_histogram_vec( - "validator_monitor_prev_epoch_attestations_min_delay_seconds", - "The min delay between when the validator should send the attestation and when it was received.", - &["validator"] - ) + "validator_monitor_prev_epoch_attestations_min_delay_seconds", + "The min delay between when the validator should send the attestation and when it was received.", + &["validator"], + ) }); pub static VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_AGGREGATE_INCLUSIONS: LazyLock< Result, @@ -1058,10 +1058,10 @@ pub static VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_MIN_DELAY_SECONDS: LazyLock< Result, > = LazyLock::new(|| { try_create_histogram_vec( - "validator_monitor_prev_epoch_aggregates_min_delay_seconds", - "The min delay between when the validator should send the aggregate and when it was received.", - &["validator"] - ) + "validator_monitor_prev_epoch_aggregates_min_delay_seconds", + "The min delay between when the validator should send the aggregate and when it was received.", + &["validator"], + ) }); pub static VALIDATOR_MONITOR_PREV_EPOCH_EXITS_TOTAL: LazyLock> = LazyLock::new(|| { @@ -1100,10 +1100,10 @@ pub static VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_MIN_DELAY_SECOND Result, > = LazyLock::new(|| { try_create_histogram_vec( - "validator_monitor_prev_epoch_sync_committee_messages_min_delay_seconds", - "The min delay between when the validator should send the sync committee message and when it was received.", - &["validator"] - ) + "validator_monitor_prev_epoch_sync_committee_messages_min_delay_seconds", + "The min delay between when the validator should send the sync committee message and when it was received.", + &["validator"], + ) }); pub static VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_INCLUSIONS: LazyLock< Result, @@ -1135,10 +1135,10 @@ pub static VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_MIN_DELAY_SECONDS: Laz Result, > = LazyLock::new(|| { try_create_histogram_vec( - "validator_monitor_prev_epoch_sync_contribution_min_delay_seconds", - "The min delay between when the validator should send the sync contribution and when it was received.", - &["validator"] - ) + "validator_monitor_prev_epoch_sync_contribution_min_delay_seconds", + "The min delay between when the validator should send the sync contribution and when it was received.", + &["validator"], + ) }); pub static VALIDATOR_MONITOR_VALIDATOR_IN_CURRENT_SYNC_COMMITTEE: LazyLock> = LazyLock::new(|| { @@ -1172,7 +1172,7 @@ pub static VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_DELAY_SECONDS: LazyLock< try_create_histogram_vec( "validator_monitor_unaggregated_attestation_delay_seconds", "The delay between when the validator should send the attestation and when it was received.", - &["src", "validator"] + &["src", "validator"], ) }); pub static VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_TOTAL: LazyLock> = @@ -1186,10 +1186,10 @@ pub static VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_TOTAL: LazyLock> = LazyLock::new(|| { try_create_histogram_vec( - "validator_monitor_sync_committee_messages_delay_seconds", - "The delay between when the validator should send the sync committee message and when it was received.", - &["src", "validator"] - ) + "validator_monitor_sync_committee_messages_delay_seconds", + "The delay between when the validator should send the sync committee message and when it was received.", + &["src", "validator"], + ) }); pub static VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_TOTAL: LazyLock> = LazyLock::new(|| { @@ -1202,10 +1202,10 @@ pub static VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_TOTAL: LazyLock> = LazyLock::new(|| { try_create_histogram_vec( - "validator_monitor_sync_contributions_delay_seconds", - "The delay between when the aggregator should send the sync contribution and when it was received.", - &["src", "validator"] - ) + "validator_monitor_sync_contributions_delay_seconds", + "The delay between when the aggregator should send the sync contribution and when it was received.", + &["src", "validator"], + ) }); pub static VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_TOTAL: LazyLock> = LazyLock::new(|| { @@ -1218,10 +1218,10 @@ pub static VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_TOTAL: LazyLock> = LazyLock::new(|| { try_create_histogram_vec( - "validator_monitor_aggregated_attestation_delay_seconds", - "The delay between then the validator should send the aggregate and when it was received.", - &["src", "validator"] - ) + "validator_monitor_aggregated_attestation_delay_seconds", + "The delay between then the validator should send the aggregate and when it was received.", + &["src", "validator"], + ) }); pub static VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_TOTAL: LazyLock> = LazyLock::new(|| { @@ -1269,10 +1269,10 @@ pub static VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_BLOCK_TOTAL: LazyLock< pub static VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_DELAY_SLOTS: LazyLock> = LazyLock::new(|| { try_create_int_gauge_vec( - "validator_monitor_attestation_in_block_delay_slots", - "The excess slots (beyond the minimum delay) between the attestation slot and the block slot.", - &["src", "validator"] - ) + "validator_monitor_attestation_in_block_delay_slots", + "The excess slots (beyond the minimum delay) between the attestation slot and the block slot.", + &["src", "validator"], + ) }); pub static VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL: LazyLock> = LazyLock::new(|| { @@ -1380,20 +1380,21 @@ pub static BEACON_BLOCK_DELAY_IMPORTED_TIME: LazyLock> = LazyLo ) }); -pub static BEACON_BLOCK_DELAY_HEAD_IMPORTED_TIME: LazyLock> = - LazyLock::new(|| { +pub static BEACON_BLOCK_DELAY_HEAD_IMPORTED_TIME: LazyLock> = LazyLock::new( + || { try_create_int_gauge( - "beacon_block_delay_head_imported_time", - "Duration between the time that block was imported and the time when it was set as head.", - ) - }); + "beacon_block_delay_head_imported_time", + "Duration between the time that block was imported and the time when it was set as head.", + ) + }, +); pub static BEACON_BLOCK_DELAY_HEAD_SLOT_START_EXCEEDED_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_block_delay_head_slot_start_exceeded_total", - "A counter that is triggered when the duration between the start of the block's slot and the current time \ + "beacon_block_delay_head_slot_start_exceeded_total", + "A counter that is triggered when the duration between the start of the block's slot and the current time \ will result in failed attestations.", - ) + ) }); /* diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 09534fc4ccf..bd232f2e8a2 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -3,10 +3,10 @@ use crate::summaries_dag::{DAGStateSummary, Error as SummariesDagError, StateSum use parking_lot::Mutex; use std::collections::HashSet; use std::mem; -use std::sync::{mpsc, Arc}; +use std::sync::{Arc, mpsc}; use std::thread; use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use store::hot_cold_store::{migrate_database, HotColdDBError}; +use store::hot_cold_store::{HotColdDBError, migrate_database}; use store::{Error, ItemStore, Split, StoreOp}; pub use store::{HotColdDB, MemoryStore}; use tracing::{debug, error, info, warn}; @@ -223,15 +223,14 @@ impl, Cold: ItemStore> BackgroundMigrator { // Schedule another reconstruction batch if required and we have access to the // channel for requeueing. - if let Some(tx) = opt_tx { - if !db.get_anchor_info().all_historic_states_stored() { - if let Err(e) = tx.send(Notification::Reconstruction) { - error!( - error = ?e, - "Unable to requeue reconstruction notification" - ); - } - } + if let Some(tx) = opt_tx + && !db.get_anchor_info().all_historic_states_stored() + && let Err(e) = tx.send(Notification::Reconstruction) + { + error!( + error = ?e, + "Unable to requeue reconstruction notification" + ); } } Err(e) => { diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index 7e23edbae8d..4c4478d17e6 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -581,9 +581,9 @@ mod tests { use store::BitVector; use tree_hash::TreeHash; use types::{ - test_utils::{generate_deterministic_keypair, test_random_instance}, Attestation, AttestationBase, AttestationElectra, FixedBytesExtended, Fork, Hash256, SyncCommitteeMessage, + test_utils::{generate_deterministic_keypair, test_random_instance}, }; type E = types::MainnetEthSpec; @@ -647,11 +647,11 @@ mod tests { fn unset_attestation_bit(a: &mut Attestation, i: usize) { match a { - Attestation::Base(ref mut att) => att + Attestation::Base(att) => att .aggregation_bits .set(i, false) .expect("should unset aggregation bit"), - Attestation::Electra(ref mut att) => att + Attestation::Electra(att) => att .aggregation_bits .set(i, false) .expect("should unset aggregation bit"), diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index 20ed36ace75..f6f62e1b73b 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -473,7 +473,7 @@ where #[cfg(not(debug_assertions))] mod tests { use super::*; - use types::{test_utils::test_random_instance, AttestationBase, FixedBytesExtended, Hash256}; + use types::{AttestationBase, FixedBytesExtended, Hash256, test_utils::test_random_instance}; type E = types::MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index 5bba8e4d8e3..34d68fe3ac0 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -633,18 +633,24 @@ mod tests { let value = Hash256::zero(); // Assert there is no entry. - assert!(store - .observation_for_validator(key, validator_index) - .unwrap() - .is_none()); - assert!(!store - .validator_has_been_observed(key, validator_index) - .unwrap()); + assert!( + store + .observation_for_validator(key, validator_index) + .unwrap() + .is_none() + ); + assert!( + !store + .validator_has_been_observed(key, validator_index) + .unwrap() + ); // Add an entry. - assert!(!store - .observe_validator(key, validator_index, value) - .unwrap()); + assert!( + !store + .observe_validator(key, validator_index, value) + .unwrap() + ); // Assert there is a correct entry. assert_eq!( @@ -653,9 +659,11 @@ mod tests { .unwrap(), Some(value) ); - assert!(store - .validator_has_been_observed(key, validator_index) - .unwrap()); + assert!( + store + .validator_has_been_observed(key, validator_index) + .unwrap() + ); let alternate_value = Hash256::from_low_u64_be(1); diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 969d03a11b6..49614c5b542 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -1,5 +1,5 @@ use derivative::Derivative; -use smallvec::{smallvec, SmallVec}; +use smallvec::{SmallVec, smallvec}; use state_processing::{SigVerifiedOp, TransformPersist, VerifyOperation, VerifyOperationAt}; use std::collections::HashSet; use std::marker::PhantomData; diff --git a/beacon_node/beacon_chain/src/pre_finalization_cache.rs b/beacon_node/beacon_chain/src/pre_finalization_cache.rs index 5bd45dc59f6..8996d6b8745 100644 --- a/beacon_node/beacon_chain/src/pre_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/pre_finalization_cache.rs @@ -5,8 +5,8 @@ use parking_lot::Mutex; use std::num::NonZeroUsize; use std::time::Duration; use tracing::debug; -use types::non_zero_usize::new_non_zero_usize; use types::Hash256; +use types::non_zero_usize::new_non_zero_usize; const BLOCK_ROOT_CACHE_LIMIT: NonZeroUsize = new_non_zero_usize(512); const LOOKUP_LIMIT: NonZeroUsize = new_non_zero_usize(8); diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 15c9498e1c1..5e813624db9 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -7,9 +7,9 @@ mod migration_schema_v27; use crate::beacon_chain::BeaconChainTypes; use std::sync::Arc; -use store::hot_cold_store::{HotColdDB, HotColdDBError}; -use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::Error as StoreError; +use store::hot_cold_store::{HotColdDB, HotColdDBError}; +use store::metadata::{CURRENT_SCHEMA_VERSION, SchemaVersion}; /// Migrate the database from one schema version to another, applying all requisite mutations. pub fn migrate_schema( diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs index d70f41bb7eb..bc832c3399b 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs @@ -1,8 +1,8 @@ +use crate::BeaconForkChoiceStore; use crate::beacon_chain::BeaconChainTypes; use crate::persisted_fork_choice::PersistedForkChoice; use crate::schema_change::StoreError; -use crate::test_utils::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY, FORK_CHOICE_DB_KEY}; -use crate::BeaconForkChoiceStore; +use crate::test_utils::{BEACON_CHAIN_DB_KEY, FORK_CHOICE_DB_KEY, PersistedBeaconChain}; use fork_choice::{ForkChoice, ResetPayloadStatuses}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs index 6901c99ceec..1e1823a8364 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs @@ -9,13 +9,13 @@ use std::{ time::{Duration, Instant}, }; use store::{ + DBColumn, Error, HotColdDB, HotStateSummary, KeyValueStore, KeyValueStoreOp, StoreItem, hdiff::StorageStrategy, hot_cold_store::{HotStateSummaryV22, OptionalDiffBaseState}, - DBColumn, Error, HotColdDB, HotStateSummary, KeyValueStore, KeyValueStoreOp, StoreItem, }; use tracing::{debug, info, warn}; use types::{ - BeaconState, ChainSpec, Checkpoint, CommitteeCache, EthSpec, Hash256, Slot, CACHED_EPOCHS, + BeaconState, CACHED_EPOCHS, ChainSpec, Checkpoint, CommitteeCache, EthSpec, Hash256, Slot, }; /// We stopped using the pruning checkpoint in schema v23 but never explicitly deleted it. diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs index 2e2a6bdc4f1..661d015942e 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs @@ -1,6 +1,6 @@ -use crate::persisted_custody::{PersistedCustody, CUSTODY_DB_KEY}; -use crate::validator_custody::CustodyContextSsz; use crate::BeaconChainTypes; +use crate::persisted_custody::{CUSTODY_DB_KEY, PersistedCustody}; +use crate::validator_custody::CustodyContextSsz; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::sync::Arc; diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs index 6275b1c5bea..fbe865ee27e 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs @@ -1,6 +1,6 @@ use crate::BeaconChainTypes; use std::sync::Arc; -use store::{metadata::SchemaVersion, Error, HotColdDB}; +use store::{Error, HotColdDB, metadata::SchemaVersion}; /// Add `DataColumnCustodyInfo` entry to v27. pub fn upgrade_to_v27( diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 1aa23c28fcf..22921147a68 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -2,14 +2,14 @@ use std::collections::HashMap; use std::sync::Arc; use itertools::Itertools; -use oneshot_broadcast::{oneshot, Receiver, Sender}; +use oneshot_broadcast::{Receiver, Sender, oneshot}; use tracing::debug; use types::{ - beacon_state::CommitteeCache, AttestationShufflingId, BeaconState, Epoch, EthSpec, Hash256, - RelativeEpoch, + AttestationShufflingId, BeaconState, Epoch, EthSpec, Hash256, RelativeEpoch, + beacon_state::CommitteeCache, }; -use crate::{metrics, BeaconChainError}; +use crate::{BeaconChainError, metrics}; /// The size of the cache that stores committee caches for quicker verification. /// diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index e5472fcf52f..27c2c7c0a11 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -15,17 +15,17 @@ //! 2. There's a possibility that the head block is never built upon, causing wasted CPU cycles. use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::{ - chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, BeaconChain, BeaconChainError, BeaconChainTypes, + BeaconChain, BeaconChainError, BeaconChainTypes, chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, }; use slot_clock::SlotClock; use state_processing::per_slot_processing; use std::sync::{ - atomic::{AtomicBool, Ordering}, Arc, + atomic::{AtomicBool, Ordering}, }; use task_executor::TaskExecutor; -use tokio::time::{sleep, sleep_until, Instant}; -use tracing::{debug, debug_span, error, instrument, warn, Instrument}; +use tokio::time::{Instant, sleep, sleep_until}; +use tracing::{Instrument, debug, debug_span, error, instrument, warn}; use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform @@ -241,14 +241,14 @@ async fn state_advance_timer( beacon_chain.task_executor.clone().spawn_blocking( move || { // Signal block proposal for the next slot (if it happens to be waiting). - if let Some(tx) = &beacon_chain.fork_choice_signal_tx { - if let Err(e) = tx.notify_fork_choice_complete(next_slot) { - warn!( - error = ?e, - slot = %next_slot, - "Error signalling fork choice waiter" - ); - } + if let Some(tx) = &beacon_chain.fork_choice_signal_tx + && let Err(e) = tx.notify_fork_choice_complete(next_slot) + { + warn!( + error = ?e, + slot = %next_slot, + "Error signalling fork choice waiter" + ); } }, "fork_choice_advance_signal_tx", diff --git a/beacon_node/beacon_chain/src/summaries_dag.rs b/beacon_node/beacon_chain/src/summaries_dag.rs index 42d078baebe..d74bf638efc 100644 --- a/beacon_node/beacon_chain/src/summaries_dag.rs +++ b/beacon_node/beacon_chain/src/summaries_dag.rs @@ -1,7 +1,7 @@ use itertools::Itertools; use std::{ cmp::Ordering, - collections::{btree_map::Entry, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, btree_map::Entry}, }; use store::HotStateSummary; use types::{Hash256, Slot}; @@ -88,7 +88,7 @@ impl StateSummariesDAG { block_root: summary.latest_block_root, existing_state_summary: (summary.slot, state_root).into(), new_state_summary: (*existing.key(), existing.get().0), - }) + }); } } @@ -136,7 +136,7 @@ impl StateSummariesDAG { block_root: summary.latest_block_root, existing_state_summary: (summary.slot, *state_root).into(), new_state_summary: (*existing.key(), *existing.get().0), - }) + }); } } } @@ -288,7 +288,7 @@ impl StateSummariesDAG { ancestor_slot, state_root, state_slot: summary.slot, - }) + }); } Ordering::Equal => { return Ok(state_root); @@ -322,15 +322,15 @@ impl StateSummariesDAG { loop { if let Some(summary) = self.state_summaries_by_state_root.get(&state_root) { // Detect cycles, including the case where `previous_state_root == state_root`. - if let Some(last_slot) = last_slot { - if summary.slot >= last_slot { - return Err(Error::CircularAncestorChain { - state_root, - previous_state_root: summary.previous_state_root, - slot: summary.slot, - last_slot, - }); - } + if let Some(last_slot) = last_slot + && summary.slot >= last_slot + { + return Err(Error::CircularAncestorChain { + state_root, + previous_state_root: summary.previous_state_root, + slot: summary.slot, + last_slot, + }); } ancestors.push((state_root, summary.slot)); diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 768c971f94d..f8041769210 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -28,9 +28,9 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::{ - metrics, observed_aggregates::ObserveOutcome, BeaconChain, BeaconChainError, BeaconChainTypes, + BeaconChain, BeaconChainError, BeaconChainTypes, metrics, observed_aggregates::ObserveOutcome, }; -use bls::{verify_signature_sets, PublicKeyBytes}; +use bls::{PublicKeyBytes, verify_signature_sets}; use derivative::Derivative; use safe_arith::ArithError; use slot_clock::SlotClock; @@ -46,14 +46,14 @@ use std::collections::HashMap; use strum::AsRefStr; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use types::ChainSpec; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::slot_data::SlotData; use types::sync_committee::Error as SyncCommitteeError; -use types::ChainSpec; use types::{ - sync_committee_contribution::Error as ContributionError, AggregateSignature, BeaconStateError, - EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, - SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, + AggregateSignature, BeaconStateError, EthSpec, Hash256, SignedContributionAndProof, Slot, + SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, + sync_committee_contribution::Error as ContributionError, }; /// Returned when a sync committee contribution was not successfully verified. It might not have been verified for @@ -505,15 +505,14 @@ impl VerifiedSyncCommitteeMessage { validator_index as usize, ) .map_err(BeaconChainError::from)? + && !should_override_prev(&prev_root, &new_root) { - if !should_override_prev(&prev_root, &new_root) { - return Err(Error::PriorSyncCommitteeMessageKnown { - validator_index, - slot: sync_message.slot, - prev_root, - new_root, - }); - } + return Err(Error::PriorSyncCommitteeMessageKnown { + validator_index, + slot: sync_message.slot, + prev_root, + new_root, + }); } // The aggregate signature of the sync committee message is valid. diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 62e20cba777..276f9a3fb0d 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -4,33 +4,33 @@ use crate::data_column_verification::CustodyDataColumn; use crate::kzg_utils::build_data_column_sidecars; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; +use crate::{BeaconBlockResponseWrapper, get_block_root}; +use crate::{ + BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler, + StateSkipConfig, + builder::{BeaconChainBuilder, Witness}, +}; pub use crate::{ + BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, beacon_chain::{BEACON_CHAIN_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, single_attestation::single_attestation_to_attestation, sync_committee_verification::Error as SyncCommitteeError, validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}, - BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; -use crate::{ - builder::{BeaconChainBuilder, Witness}, - BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler, - StateSkipConfig, -}; -use crate::{get_block_root, BeaconBlockResponseWrapper}; use bls::get_withdrawal_credentials; use eth2::types::SignedBlockContentsTuple; use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ + ExecutionLayer, auth::JwtKey, test_utils::{ - ExecutionBlockGenerator, MockBuilder, MockExecutionLayer, DEFAULT_JWT_SECRET, - DEFAULT_TERMINAL_BLOCK, + DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, ExecutionBlockGenerator, MockBuilder, + MockExecutionLayer, }, - ExecutionLayer, }; use futures::channel::mpsc::Receiver; -pub use genesis::{InteropGenesisBuilder, DEFAULT_ETH1_BLOCK_HASH}; +pub use genesis::{DEFAULT_ETH1_BLOCK_HASH, InteropGenesisBuilder}; use int_to_bytes::int_to_bytes32; use kzg::trusted_setup::get_trusted_setup; use kzg::{Kzg, TrustedSetup}; @@ -38,9 +38,9 @@ use logging::create_test_tracing_subscriber; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; use parking_lot::{Mutex, RwLockWriteGuard}; -use rand::rngs::StdRng; use rand::Rng; use rand::SeedableRng; +use rand::rngs::StdRng; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; @@ -54,14 +54,14 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, LazyLock}; use std::time::Duration; use store::database::interface::BeaconNodeBackend; -use store::{config::StoreConfig, HotColdDB, ItemStore, MemoryStore}; +use store::{HotColdDB, ItemStore, MemoryStore, config::StoreConfig}; use task_executor::TaskExecutor; -use task_executor::{test_utils::TestRuntime, ShutdownReason}; +use task_executor::{ShutdownReason, test_utils::TestRuntime}; use tree_hash::TreeHash; use types::indexed_attestation::IndexedAttestationBase; use types::payload::BlockProductionVersion; -pub use types::test_utils::generate_deterministic_keypairs; use types::test_utils::TestRandom; +pub use types::test_utils::generate_deterministic_keypairs; use types::{typenum::U4294967296, *}; // 4th September 2019 @@ -722,7 +722,7 @@ where pub fn set_mock_builder( &mut self, beacon_url: SensitiveUrl, - ) -> impl futures::Future { + ) -> impl futures::Future + 'static { let mock_el = self .mock_execution_layer .as_ref() @@ -894,7 +894,9 @@ where let fork_choice = self.chain.canonical_head.fork_choice_read_lock(); if heads.is_empty() { let nodes = &fork_choice.proto_array().core_proto_array().nodes; - panic!("Expected to know head block root {head_block_root:?}, but heads is empty. Nodes: {nodes:#?}"); + panic!( + "Expected to know head block root {head_block_root:?}, but heads is empty. Nodes: {nodes:#?}" + ); } else { panic!( "Expected to know head block root {head_block_root:?}, known heads {heads:#?}" @@ -931,7 +933,7 @@ where // If we produce two blocks for the same slot, they hash up to the same value and // BeaconChain errors out with `DuplicateFullyImported`. Vary the graffiti so that we produce // different blocks each time. - let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); + let graffiti = Graffiti::from(self.rng.lock().random::<[u8; 32]>()); let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); @@ -990,7 +992,7 @@ where // If we produce two blocks for the same slot, they hash up to the same value and // BeaconChain errors out with `DuplicateFullyImported`. Vary the graffiti so that we produce // different blocks each time. - let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); + let graffiti = Graffiti::from(self.rng.lock().random::<[u8; 32]>()); let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); @@ -3206,7 +3208,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadDeneb = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=max_blobs), + NumBlobs::Random => rng.random_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; @@ -3226,7 +3228,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadElectra = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=max_blobs), + NumBlobs::Random => rng.random_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; @@ -3245,7 +3247,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadFulu = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=max_blobs), + NumBlobs::Random => rng.random_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; diff --git a/beacon_node/beacon_chain/src/validator_custody.rs b/beacon_node/beacon_chain/src/validator_custody.rs index 4e189b5badf..3531fc33c11 100644 --- a/beacon_node/beacon_chain/src/validator_custody.rs +++ b/beacon_node/beacon_chain/src/validator_custody.rs @@ -6,7 +6,7 @@ use std::{ collections::{BTreeMap, HashMap}, sync::atomic::{AtomicU64, Ordering}, }; -use types::data_column_custody_group::{compute_columns_for_custody_group, CustodyIndex}; +use types::data_column_custody_group::{CustodyIndex, compute_columns_for_custody_group}; use types::{ChainSpec, ColumnIndex, Epoch, EthSpec, Slot}; /// A delay before making the CGC change effective to the data availability checker. @@ -283,15 +283,14 @@ impl CustodyContext { /// /// See also: [`Self::num_of_custody_groups_to_sample`]. fn custody_group_count_at_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> u64 { - let custody_group_count = if self.current_is_supernode { + if self.current_is_supernode { spec.number_of_custody_groups } else { self.validator_registrations .read() .custody_requirement_at_epoch(epoch) .unwrap_or(spec.custody_requirement) - }; - custody_group_count + } } /// Returns the count of custody groups this node must _sample_ for a block at `epoch` to import. @@ -360,8 +359,8 @@ impl From<&CustodyContext> for CustodyContextSsz { #[cfg(test)] mod tests { + use rand::rng; use rand::seq::SliceRandom; - use rand::thread_rng; use types::MainnetEthSpec; use super::*; @@ -627,7 +626,7 @@ mod tests { // initialise ordered columns let mut all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); - all_custody_groups_ordered.shuffle(&mut thread_rng()); + all_custody_groups_ordered.shuffle(&mut rng()); custody_context .init_ordered_data_columns_from_custody_groups( diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 80a6f64582f..23f1a7d4308 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -12,7 +12,7 @@ use slot_clock::SlotClock; use smallvec::SmallVec; use state_processing::common::get_attestation_participation_flag_indices; use state_processing::per_epoch_processing::{ - errors::EpochProcessingError, EpochProcessingSummary, + EpochProcessingSummary, errors::EpochProcessingError, }; use std::collections::{HashMap, HashSet}; use std::io; @@ -163,7 +163,7 @@ impl EpochSummary { /// - It is `None`. /// - `new` is greater than its current value. fn update_if_lt(current: &mut Option, new: T) { - if let Some(ref mut current) = current { + if let Some(current) = current { if new < *current { *current = new } @@ -460,11 +460,12 @@ impl ValidatorMonitor { let unaggregated_attestations = &mut self.unaggregated_attestations; // Pruning, this removes the oldest key/pair of the hashmap if it's greater than MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH - if unaggregated_attestations.len() >= MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH { - if let Some(oldest_slot) = unaggregated_attestations.keys().min().copied() { - unaggregated_attestations.remove(&oldest_slot); - } + if unaggregated_attestations.len() >= MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH + && let Some(oldest_slot) = unaggregated_attestations.keys().min().copied() + { + unaggregated_attestations.remove(&oldest_slot); } + let slot = attestation.data().slot; self.unaggregated_attestations.insert(slot, attestation); } @@ -1095,19 +1096,19 @@ impl ValidatorMonitor { return; } - if let Some(pubkey) = self.indices.get(&validator_index) { - if !self.validators.contains_key(pubkey) { - info!( - %pubkey, - validator = %validator_index, - "Started monitoring validator" - ); + if let Some(pubkey) = self.indices.get(&validator_index) + && !self.validators.contains_key(pubkey) + { + info!( + %pubkey, + validator = %validator_index, + "Started monitoring validator" + ); - self.validators.insert( - *pubkey, - MonitoredValidator::new(*pubkey, Some(validator_index)), - ); - } + self.validators.insert( + *pubkey, + MonitoredValidator::new(*pubkey, Some(validator_index)), + ); } } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index d89a8530e1b..0acb23d5126 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -3,7 +3,7 @@ use beacon_chain::attestation_simulator::produce_unaggregated_attestation; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; -use beacon_chain::{metrics, StateSkipConfig, WhenSlotSkipped}; +use beacon_chain::{StateSkipConfig, WhenSlotSkipped, metrics}; use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; use types::{ diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 11729f8d8a8..706ffad3c1a 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -1,29 +1,29 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_verification::{ - batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations, Error, + Error, batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations, }; use beacon_chain::observed_aggregates::ObservedAttestationKey; -use beacon_chain::test_utils::{MakeAttestationOptions, HARNESS_GENESIS_TIME}; +use beacon_chain::test_utils::{HARNESS_GENESIS_TIME, MakeAttestationOptions}; use beacon_chain::{ + BeaconChain, BeaconChainError, BeaconChainTypes, ChainConfig, WhenSlotSkipped, attestation_verification::Error as AttnError, test_utils::{ - single_attestation_to_attestation, test_spec, AttestationStrategy, BeaconChainHarness, - BlockStrategy, EphemeralHarnessType, + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + single_attestation_to_attestation, test_spec, }, - BeaconChain, BeaconChainError, BeaconChainTypes, ChainConfig, WhenSlotSkipped, }; -use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; +use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state}; use int_to_bytes::int_to_bytes32; use state_processing::per_slot_processing; use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; use types::{ + Address, AggregateSignature, Attestation, AttestationRef, ChainSpec, Epoch, EthSpec, + FixedBytesExtended, ForkName, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof, + SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, Unsigned, signed_aggregate_and_proof::SignedAggregateAndProofRefMut, - test_utils::generate_deterministic_keypair, Address, AggregateSignature, Attestation, - AttestationRef, ChainSpec, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, Keypair, - MainnetEthSpec, SecretKey, SelectionProof, SignedAggregateAndProof, SingleAttestation, Slot, - SubnetId, Unsigned, + test_utils::generate_deterministic_keypair, }; pub type E = MainnetEthSpec; @@ -1347,10 +1347,12 @@ async fn attestation_to_finalized_block() { ); // Pre-finalization block cache should contain the block root. - assert!(harness - .chain - .pre_finalization_block_cache - .contains(earlier_block_root)); + assert!( + harness + .chain + .pre_finalization_block_cache + .contains(earlier_block_root) + ); } #[tokio::test] @@ -1407,24 +1409,30 @@ async fn verify_aggregate_for_gossip_doppelganger_detection() { assert!(harness.chain.validator_seen_at_epoch(index, epoch)); // Check the correct beacon cache is populated - assert!(!harness - .chain - .observed_block_attesters - .read() - .validator_has_been_observed(epoch, index) - .expect("should check if block attester was observed")); - assert!(!harness - .chain - .observed_gossip_attesters - .read() - .validator_has_been_observed(epoch, index) - .expect("should check if gossip attester was observed")); - assert!(harness - .chain - .observed_aggregators - .read() - .validator_has_been_observed(epoch, index) - .expect("should check if gossip aggregator was observed")); + assert!( + !harness + .chain + .observed_block_attesters + .read() + .validator_has_been_observed(epoch, index) + .expect("should check if block attester was observed") + ); + assert!( + !harness + .chain + .observed_gossip_attesters + .read() + .validator_has_been_observed(epoch, index) + .expect("should check if gossip attester was observed") + ); + assert!( + harness + .chain + .observed_aggregators + .read() + .validator_has_been_observed(epoch, index) + .expect("should check if gossip aggregator was observed") + ); } #[tokio::test] @@ -1464,24 +1472,30 @@ async fn verify_attestation_for_gossip_doppelganger_detection() { assert!(harness.chain.validator_seen_at_epoch(index, epoch)); // Check the correct beacon cache is populated - assert!(!harness - .chain - .observed_block_attesters - .read() - .validator_has_been_observed(epoch, index) - .expect("should check if block attester was observed")); - assert!(harness - .chain - .observed_gossip_attesters - .read() - .validator_has_been_observed(epoch, index) - .expect("should check if gossip attester was observed")); - assert!(!harness - .chain - .observed_aggregators - .read() - .validator_has_been_observed(epoch, index) - .expect("should check if gossip aggregator was observed")); + assert!( + !harness + .chain + .observed_block_attesters + .read() + .validator_has_been_observed(epoch, index) + .expect("should check if block attester was observed") + ); + assert!( + harness + .chain + .observed_gossip_attesters + .read() + .validator_has_been_observed(epoch, index) + .expect("should check if gossip attester was observed") + ); + assert!( + !harness + .chain + .observed_aggregators + .read() + .validator_has_been_observed(epoch, index) + .expect("should check if gossip aggregator was observed") + ); } #[tokio::test] @@ -1544,7 +1558,8 @@ async fn attestation_verification_use_head_state_fork() { .map(|(attestation, subnet_id)| (attestation, Some(*subnet_id))); assert!( - batch_verify_unaggregated_attestations(attestations_and_subnets, &harness.chain).is_ok(), + batch_verify_unaggregated_attestations(attestations_and_subnets, &harness.chain) + .is_ok(), "should accept attestations with `data.slot` >= first capella slot signed using the Capella fork" ); } diff --git a/beacon_node/beacon_chain/tests/bellatrix.rs b/beacon_node/beacon_chain/tests/bellatrix.rs index 3a424e73bab..5d466dd1d38 100644 --- a/beacon_node/beacon_chain/tests/bellatrix.rs +++ b/beacon_node/beacon_chain/tests/bellatrix.rs @@ -1,7 +1,7 @@ #![cfg(not(debug_assertions))] // Tests run too slow in debug. use beacon_chain::test_utils::BeaconChainHarness; -use execution_layer::test_utils::{generate_pow_block, Block, DEFAULT_TERMINAL_BLOCK}; +use execution_layer::test_utils::{Block, DEFAULT_TERMINAL_BLOCK, generate_pow_block}; use types::*; const VALIDATOR_COUNT: usize = 32; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 92fea70a1dd..2afae10e965 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -3,10 +3,10 @@ use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::{ + AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock, test_utils::{ - test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, test_spec, }, - AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock, }; use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, @@ -15,9 +15,10 @@ use beacon_chain::{ use logging::create_test_tracing_subscriber; use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ + BlockProcessingError, ConsensusContext, VerifyBlockRoot, common::{attesting_indices_base, attesting_indices_electra}, - per_block_processing::{per_block_processing, BlockSignatureStrategy}, - per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot, + per_block_processing::{BlockSignatureStrategy, per_block_processing}, + per_slot_processing, }; use std::marker::PhantomData; use std::sync::{Arc, LazyLock}; @@ -769,37 +770,37 @@ async fn invalid_signature_attester_slashing() { .clone() .deconstruct(); match &mut block.body_mut() { - BeaconBlockBodyRefMut::Base(ref mut blk) => { + BeaconBlockBodyRefMut::Base(blk) => { blk.attester_slashings .push(attester_slashing.as_base().unwrap().clone()) .expect("should update attester slashing"); } - BeaconBlockBodyRefMut::Altair(ref mut blk) => { + BeaconBlockBodyRefMut::Altair(blk) => { blk.attester_slashings .push(attester_slashing.as_base().unwrap().clone()) .expect("should update attester slashing"); } - BeaconBlockBodyRefMut::Bellatrix(ref mut blk) => { + BeaconBlockBodyRefMut::Bellatrix(blk) => { blk.attester_slashings .push(attester_slashing.as_base().unwrap().clone()) .expect("should update attester slashing"); } - BeaconBlockBodyRefMut::Capella(ref mut blk) => { + BeaconBlockBodyRefMut::Capella(blk) => { blk.attester_slashings .push(attester_slashing.as_base().unwrap().clone()) .expect("should update attester slashing"); } - BeaconBlockBodyRefMut::Deneb(ref mut blk) => { + BeaconBlockBodyRefMut::Deneb(blk) => { blk.attester_slashings .push(attester_slashing.as_base().unwrap().clone()) .expect("should update attester slashing"); } - BeaconBlockBodyRefMut::Electra(ref mut blk) => { + BeaconBlockBodyRefMut::Electra(blk) => { blk.attester_slashings .push(attester_slashing.as_electra().unwrap().clone()) .expect("should update attester slashing"); } - BeaconBlockBodyRefMut::Fulu(ref mut blk) => { + BeaconBlockBodyRefMut::Fulu(blk) => { blk.attester_slashings .push(attester_slashing.as_electra().unwrap().clone()) .expect("should update attester slashing"); @@ -835,31 +836,31 @@ async fn invalid_signature_attestation() { .clone() .deconstruct(); match &mut block.body_mut() { - BeaconBlockBodyRefMut::Base(ref mut blk) => blk + BeaconBlockBodyRefMut::Base(blk) => blk .attestations .get_mut(0) .map(|att| att.signature = junk_aggregate_signature()), - BeaconBlockBodyRefMut::Altair(ref mut blk) => blk + BeaconBlockBodyRefMut::Altair(blk) => blk .attestations .get_mut(0) .map(|att| att.signature = junk_aggregate_signature()), - BeaconBlockBodyRefMut::Bellatrix(ref mut blk) => blk + BeaconBlockBodyRefMut::Bellatrix(blk) => blk .attestations .get_mut(0) .map(|att| att.signature = junk_aggregate_signature()), - BeaconBlockBodyRefMut::Capella(ref mut blk) => blk + BeaconBlockBodyRefMut::Capella(blk) => blk .attestations .get_mut(0) .map(|att| att.signature = junk_aggregate_signature()), - BeaconBlockBodyRefMut::Deneb(ref mut blk) => blk + BeaconBlockBodyRefMut::Deneb(blk) => blk .attestations .get_mut(0) .map(|att| att.signature = junk_aggregate_signature()), - BeaconBlockBodyRefMut::Electra(ref mut blk) => blk + BeaconBlockBodyRefMut::Electra(blk) => blk .attestations .get_mut(0) .map(|att| att.signature = junk_aggregate_signature()), - BeaconBlockBodyRefMut::Fulu(ref mut blk) => blk + BeaconBlockBodyRefMut::Fulu(blk) => blk .attestations .get_mut(0) .map(|att| att.signature = junk_aggregate_signature()), @@ -1214,7 +1215,12 @@ async fn block_gossip_verification() { // Check to ensure that we registered this is a valid block from this proposer. assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), + unwrap_err( + harness + .chain + .verify_block_for_gossip(Arc::new(block.clone())) + .await + ), BlockError::DuplicateImportStatusUnknown(_), ), "should register any valid signature against the proposer, even if the block failed later verification" @@ -1432,24 +1438,30 @@ async fn verify_block_for_gossip_doppelganger_detection() { assert!(harness.chain.validator_seen_at_epoch(index, epoch)); // Check the correct beacon cache is populated - assert!(harness - .chain - .observed_block_attesters - .read() - .validator_has_been_observed(epoch, index) - .expect("should check if block attester was observed")); - assert!(!harness - .chain - .observed_gossip_attesters - .read() - .validator_has_been_observed(epoch, index) - .expect("should check if gossip attester was observed")); - assert!(!harness - .chain - .observed_aggregators - .read() - .validator_has_been_observed(epoch, index) - .expect("should check if gossip aggregator was observed")); + assert!( + harness + .chain + .observed_block_attesters + .read() + .validator_has_been_observed(epoch, index) + .expect("should check if block attester was observed") + ); + assert!( + !harness + .chain + .observed_gossip_attesters + .read() + .validator_has_been_observed(epoch, index) + .expect("should check if gossip attester was observed") + ); + assert!( + !harness + .chain + .observed_aggregators + .read() + .validator_has_been_observed(epoch, index) + .expect("should check if gossip aggregator was observed") + ); } } } diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index 74a98b11834..f8b3d33a20b 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -2,8 +2,8 @@ use beacon_chain::blob_verification::GossipVerifiedBlob; use beacon_chain::data_column_verification::GossipVerifiedDataColumn; use beacon_chain::test_utils::{BeaconChainHarness, TEST_DATA_COLUMN_SIDECARS_SSZ}; use eth2::types::{EventKind, SseBlobSidecar, SseDataColumnSidecar}; -use rand::rngs::StdRng; use rand::SeedableRng; +use rand::rngs::StdRng; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; use types::test_utils::TestRandom; diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 86ab0cce804..c18af0bde70 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -3,19 +3,19 @@ #![cfg(not(debug_assertions))] use beacon_chain::{ + BeaconChainError, observed_operations::ObservationOutcome, test_utils::{ - test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, + AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, test_spec, }, - BeaconChainError, }; use state_processing::per_block_processing::errors::{ AttesterSlashingInvalid, BlockOperationError, ExitInvalid, ProposerSlashingInvalid, }; use std::sync::{Arc, LazyLock}; -use store::database::interface::BeaconNodeBackend; use store::StoreConfig; -use tempfile::{tempdir, TempDir}; +use store::database::interface::BeaconNodeBackend; +use tempfile::{TempDir, tempdir}; use types::*; pub const VALIDATOR_COUNT: usize = 24; diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 05fae7aa70f..5bd43835e33 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -2,15 +2,15 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{ + BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, NotifyExecutionLayer, OverrideForkchoiceUpdate, + StateSkipConfig, WhenSlotSkipped, canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer, - OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, - INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ - json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1}, ExecutionLayer, ForkchoiceState, PayloadAttributes, + json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1}, }; use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; @@ -822,9 +822,10 @@ async fn switches_heads() { assert_eq!(rig.harness.head_block_root(), fork_parent_root); // The fork block has not yet been validated. - assert!(rig - .execution_status(fork_block_root) - .is_optimistic_or_invalid()); + assert!( + rig.execution_status(fork_block_root) + .is_optimistic_or_invalid() + ); for root in blocks { let slot = rig @@ -872,12 +873,13 @@ async fn invalid_during_processing() { ]; // 0 should be present in the chain. - assert!(rig - .harness - .chain - .get_blinded_block(&roots[0]) - .unwrap() - .is_some()); + assert!( + rig.harness + .chain + .get_blinded_block(&roots[0]) + .unwrap() + .is_some() + ); // 1 should *not* be present in the chain. assert_eq!( rig.harness.chain.get_blinded_block(&roots[1]).unwrap(), @@ -1193,10 +1195,10 @@ async fn attesting_to_optimistic_head() { .unwrap(); match &mut attestation { - Attestation::Base(ref mut att) => { + Attestation::Base(att) => { att.aggregation_bits.set(0, true).unwrap(); } - Attestation::Electra(ref mut att) => { + Attestation::Electra(att) => { att.aggregation_bits.set(0, true).unwrap(); } } @@ -1354,11 +1356,12 @@ impl InvalidHeadSetup { // head block as invalid should not result in another head being chosen. // Rather, it should fail to run fork choice and leave the invalid block as // the head. - assert!(rig - .canonical_head() - .head_execution_status() - .unwrap() - .is_invalid()); + assert!( + rig.canonical_head() + .head_execution_status() + .unwrap() + .is_invalid() + ); // Ensure that we're getting the correct error when trying to find a new // head. @@ -1511,7 +1514,12 @@ async fn weights_after_resetting_optimistic_status() { .fork_choice_read_lock() .get_block_weight(&head.head_block_root()) .unwrap(), - head.snapshot.beacon_state.validators().get(0).unwrap().effective_balance, + head.snapshot + .beacon_state + .validators() + .get(0) + .unwrap() + .effective_balance, "proposer boost should be removed from the head block and the vote of a single validator applied" ); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index fa2d028f224..0a5881e486b 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -2,12 +2,12 @@ use beacon_chain::block_verification_types::AsBlock; use beacon_chain::test_utils::{ - generate_deterministic_keypairs, BeaconChainHarness, EphemeralHarnessType, + BeaconChainHarness, EphemeralHarnessType, generate_deterministic_keypairs, }; use beacon_chain::{ + BlockError, ChainConfig, StateSkipConfig, WhenSlotSkipped, test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, - BlockError, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; use eth2::types::{StandardAttestationRewards, TotalAttestationRewards, ValidatorId}; use state_processing::{BlockReplayError, BlockReplayer}; @@ -424,9 +424,11 @@ async fn test_rewards_altair() { .unwrap(); // assert ideal rewards are greater than 0 - assert!(ideal_rewards - .iter() - .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + assert!( + ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0) + ); // apply attestation, proposal, and sync committee rewards and penalties to initial balances apply_attestation_rewards(&mut expected_balances, total_rewards); @@ -507,12 +509,16 @@ async fn test_rewards_altair_inactivity_leak() { // assert inactivity penalty for both ideal rewards and individual validators assert!(ideal_rewards.iter().all(|reward| reward.inactivity == 0)); - assert!(total_rewards[..half] - .iter() - .all(|reward| reward.inactivity == 0)); - assert!(total_rewards[half..] - .iter() - .all(|reward| reward.inactivity < 0)); + assert!( + total_rewards[..half] + .iter() + .all(|reward| reward.inactivity == 0) + ); + assert!( + total_rewards[half..] + .iter() + .all(|reward| reward.inactivity < 0) + ); // apply attestation, proposal, and sync committee rewards and penalties to initial balances apply_attestation_rewards(&mut expected_balances, total_rewards); @@ -612,9 +618,11 @@ async fn test_rewards_altair_inactivity_leak_justification_epoch() { .unwrap(); // assert ideal rewards are greater than 0 - assert!(ideal_rewards - .iter() - .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + assert!( + ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0) + ); // apply attestation, proposal, and sync committee rewards and penalties to initial balances apply_attestation_rewards(&mut expected_balances, total_rewards); @@ -688,9 +696,11 @@ async fn test_rewards_electra() { ideal_rewards.len() as u64, spec.max_effective_balance_electra / spec.effective_balance_increment ); - assert!(ideal_rewards - .iter() - .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + assert!( + ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0) + ); // apply attestation, proposal, and sync committee rewards and penalties to initial balances apply_attestation_rewards(&mut expected_balances, total_rewards); @@ -776,9 +786,11 @@ async fn check_all_electra_rewards( harness.spec.max_effective_balance_electra / harness.spec.effective_balance_increment ); - assert!(ideal_rewards - .iter() - .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + assert!( + ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0) + ); // apply attestation, proposal, and sync committee rewards and penalties to initial balances apply_attestation_rewards(&mut balances, total_rewards); diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index 1d12fc878e7..3b09921c15c 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -1,21 +1,21 @@ use beacon_chain::{ + ChainConfig, persisted_beacon_chain::PersistedBeaconChain, persisted_custody::PersistedCustody, - test_utils::{test_spec, BeaconChainHarness, DiskHarnessType}, - ChainConfig, + test_utils::{BeaconChainHarness, DiskHarnessType, test_spec}, }; use logging::create_test_tracing_subscriber; use operation_pool::PersistedOperationPool; use ssz::Encode; use std::sync::{Arc, LazyLock}; use store::{ + DBColumn, HotColdDB, StoreConfig, StoreItem, database::interface::BeaconNodeBackend, hot_cold_store::Split, metadata::{DataColumnCustodyInfo, DataColumnInfo}, - DBColumn, HotColdDB, StoreConfig, StoreItem, }; use strum::IntoEnumIterator; -use tempfile::{tempdir, TempDir}; +use tempfile::{TempDir, tempdir}; use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec, Slot}; type E = MainnetEthSpec; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 691ec003179..2a2f1d7d059 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -7,20 +7,21 @@ use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; use beacon_chain::test_utils::SyncCommitteeStrategy; use beacon_chain::test_utils::{ - get_kzg, mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, - BlockStrategy, DiskHarnessType, + AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, get_kzg, + mock_execution_layer_from_parts, test_spec, }; use beacon_chain::{ + BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, + NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, data_availability_checker::MaybeAvailableBlock, historical_blocks::HistoricalBlockError, - migrate::MigratorConfig, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, - BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, + migrate::MigratorConfig, }; use logging::create_test_tracing_subscriber; use maplit::hashset; -use rand::rngs::StdRng; use rand::Rng; +use rand::rngs::StdRng; use slot_clock::{SlotClock, TestingSlotClock}; -use state_processing::{state_advance::complete_state_advance, BlockReplayer}; +use state_processing::{BlockReplayer, state_advance::complete_state_advance}; use std::collections::HashMap; use std::collections::HashSet; use std::convert::TryInto; @@ -28,13 +29,13 @@ use std::str::FromStr; use std::sync::{Arc, LazyLock}; use std::time::Duration; use store::database::interface::BeaconNodeBackend; -use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION, STATE_UPPER_LIMIT_NO_RETAIN}; +use store::metadata::{CURRENT_SCHEMA_VERSION, STATE_UPPER_LIMIT_NO_RETAIN, SchemaVersion}; use store::{ + BlobInfo, DBColumn, HotColdDB, StoreConfig, hdiff::HierarchyConfig, iter::{BlockRootsIterator, StateRootsIterator}, - BlobInfo, DBColumn, HotColdDB, StoreConfig, }; -use tempfile::{tempdir, TempDir}; +use tempfile::{TempDir, tempdir}; use tracing::info; use types::test_utils::{SeedableRng, XorShiftRng}; use types::*; @@ -297,7 +298,7 @@ async fn randomised_skips() { let mut head_slot = 0; for slot in 1..=num_slots { - if rng.gen_bool(0.8) { + if rng.random_bool(0.8) { harness .extend_chain( 1, @@ -410,13 +411,15 @@ async fn randao_genesis_storage() { .await; // Check that genesis value is still present - assert!(harness - .chain - .head_snapshot() - .beacon_state - .randao_mixes() - .iter() - .any(|x| *x == genesis_value)); + assert!( + harness + .chain + .head_snapshot() + .beacon_state + .randao_mixes() + .iter() + .any(|x| *x == genesis_value) + ); // Then upon adding one more block, it isn't harness.advance_slot(); @@ -427,13 +430,15 @@ async fn randao_genesis_storage() { AttestationStrategy::AllValidators, ) .await; - assert!(!harness - .chain - .head_snapshot() - .beacon_state - .randao_mixes() - .iter() - .any(|x| *x == genesis_value)); + assert!( + !harness + .chain + .head_snapshot() + .beacon_state + .randao_mixes() + .iter() + .any(|x| *x == genesis_value) + ); check_finalization(&harness, num_slots); check_split_slot(&harness, store); @@ -2660,10 +2665,12 @@ async fn weak_subjectivity_sync_test( // Prune_payloads is set to false in the default config, so the payload should exist if block.message().execution_payload().is_ok() { - assert!(beacon_chain - .store - .execution_payload_exists(&block_root) - .unwrap(),); + assert!( + beacon_chain + .store + .execution_payload_exists(&block_root) + .unwrap(), + ); } prev_block_root = block_root; @@ -3622,10 +3629,12 @@ async fn prune_historic_states() { .map(Result::unwrap) .collect::>(); for &(state_root, slot) in &first_epoch_state_roots { - assert!(store - .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) - .unwrap() - .is_some()); + assert!( + store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + .is_some() + ); } store @@ -3794,11 +3803,13 @@ async fn replay_from_split_state() { let anchor_slot = store.get_anchor_info().anchor_slot; assert_eq!(split.slot, 3 * E::slots_per_epoch()); assert_eq!(anchor_slot, 0); - assert!(store - .hierarchy - .storage_strategy(split.slot, anchor_slot) - .unwrap() - .is_replay_from()); + assert!( + store + .hierarchy + .storage_strategy(split.slot, anchor_slot) + .unwrap() + .is_replay_from() + ); // Close the database and reopen it. drop(store); diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index c8bbcce20d3..9dd12410fbb 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -5,7 +5,7 @@ use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, Relativ use int_to_bytes::int_to_bytes32; use safe_arith::SafeArith; use state_processing::{ - per_block_processing::{altair::sync_committee::process_sync_aggregate, VerifySignatures}, + per_block_processing::{VerifySignatures, altair::sync_committee::process_sync_aggregate}, state_advance::complete_state_advance, }; use std::sync::LazyLock; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 55ef3dc2794..ec0e607d00a 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -1,12 +1,12 @@ #![cfg(not(debug_assertions))] use beacon_chain::{ + BeaconChain, ChainConfig, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, attestation_verification::Error as AttnError, test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - BeaconChain, ChainConfig, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, }; use operation_pool::PersistedOperationPool; use state_processing::EpochProcessingError; @@ -1035,11 +1035,13 @@ async fn pseudo_finalize_test_generic( // This is a regression test for https://github.com/sigp/lighthouse/pull/7105 if !expect_true_finalization_migration { assert_eq!(expected_split_slot, pseudo_finalized_slot); - assert!(!harness - .chain - .canonical_head - .fork_choice_read_lock() - .contains_block(&split.block_root)); + assert!( + !harness + .chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&split.block_root) + ); } } diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 5b861d1a4a4..4e2554d3d8d 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -1,7 +1,7 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS}; +use beacon_chain::validator_monitor::{MISSED_BLOCK_LAG_SLOTS, ValidatorMonitorConfig}; use std::sync::LazyLock; use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 14460a93fc2..92a3ad464e6 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -44,8 +44,8 @@ use crate::work_reprocessing_queue::{ use futures::stream::{Stream, StreamExt}; use futures::task::Poll; use lighthouse_network::{MessageId, NetworkGlobals, PeerId}; -use logging::crit; use logging::TimeLatch; +use logging::crit; use parking_lot::Mutex; pub use scheduler::work_reprocessing_queue; use serde::{Deserialize, Serialize}; @@ -69,8 +69,8 @@ use types::{ }; use work_reprocessing_queue::IgnoredRpcBlock; use work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, - QueuedUnaggregate, ReadyWork, + QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, + spawn_reprocess_scheduler, }; mod metrics; diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index b1d1d3dda60..2c27b78f630 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -87,9 +87,9 @@ pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_processor_reprocessing_queue_expired_attestations", - "Number of queued attestations which have expired before a matching block has been found." - ) + "beacon_processor_reprocessing_queue_expired_attestations", + "Number of queued attestations which have expired before a matching block has been found.", + ) }); pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: LazyLock> = LazyLock::new(|| { @@ -107,7 +107,7 @@ pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: LazyL > = LazyLock::new(|| { try_create_int_counter( "beacon_processor_reprocessing_queue_expired_optimistic_updates", - "Number of queued light client optimistic updates which have expired before a matching block has been found." + "Number of queued light client optimistic updates which have expired before a matching block has been found.", ) }); pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: LazyLock< @@ -115,7 +115,7 @@ pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: LazyL > = LazyLock::new(|| { try_create_int_counter( "beacon_processor_reprocessing_queue_matched_optimistic_updates", - "Number of queued light client optimistic updates where a matching block has been imported." + "Number of queued light client optimistic updates where a matching block has been imported.", ) }); @@ -132,6 +132,6 @@ pub static BEACON_PROCESSOR_QUEUE_TIME: LazyLock> = LazyLoc try_create_histogram_vec( "beacon_processor_queue_time", "The delay between when a work event was queued in the beacon processor and when it was popped from the queue", - &["work_type"] + &["work_type"], ) }); diff --git a/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs index 07d540050f9..032f14ce3d8 100644 --- a/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs @@ -16,8 +16,8 @@ use fnv::FnvHashMap; use futures::task::Poll; use futures::{Stream, StreamExt}; use itertools::Itertools; -use logging::crit; use logging::TimeLatch; +use logging::crit; use slot_clock::SlotClock; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; @@ -480,15 +480,14 @@ impl ReprocessQueue { // This logic is slightly awkward since `SlotClock::duration_to_slot` // doesn't distinguish between a slot that has already arrived and an // error reading the slot clock. - if let Some(now) = self.slot_clock.now() { - if block_slot <= now - && self - .ready_work_tx - .try_send(ReadyWork::Block(early_block)) - .is_err() - { - error!("Failed to send block"); - } + if let Some(now) = self.slot_clock.now() + && block_slot <= now + && self + .ready_work_tx + .try_send(ReadyWork::Block(early_block)) + .is_err() + { + error!("Failed to send block"); } } } @@ -816,10 +815,10 @@ impl ReprocessQueue { ); } - if let Some(queued_atts) = self.awaiting_attestations_per_root.get_mut(&root) { - if let Some(index) = queued_atts.iter().position(|&id| id == queued_id) { - queued_atts.swap_remove(index); - } + if let Some(queued_atts) = self.awaiting_attestations_per_root.get_mut(&root) + && let Some(index) = queued_atts.iter().position(|&id| id == queued_id) + { + queued_atts.swap_remove(index); } } } @@ -843,12 +842,10 @@ impl ReprocessQueue { if let Some(queued_lc_updates) = self .awaiting_lc_updates_per_parent_root .get_mut(&parent_root) - { - if let Some(index) = + && let Some(index) = queued_lc_updates.iter().position(|&id| id == queued_id) - { - queued_lc_updates.swap_remove(index); - } + { + queued_lc_updates.swap_remove(index); } } } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index c1066042854..0c3fdca9077 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,3 +1,4 @@ +pub use eth2::Error; use eth2::types::beacon_response::EmptyMetadata; use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::{ @@ -5,20 +6,19 @@ use eth2::types::{ ForkVersionedResponse, PublicKeyBytes, SignedValidatorRegistrationData, Slot, }; use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock}; -pub use eth2::Error; use eth2::{ - ok_or_error, StatusCode, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, - JSON_CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER, + CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, JSON_CONTENT_TYPE_HEADER, + SSZ_CONTENT_TYPE_HEADER, StatusCode, ok_or_error, }; -use reqwest::header::{HeaderMap, HeaderValue, ACCEPT}; +use reqwest::header::{ACCEPT, HeaderMap, HeaderValue}; use reqwest::{IntoUrl, Response}; use sensitive_url::SensitiveUrl; -use serde::de::DeserializeOwned; use serde::Serialize; +use serde::de::DeserializeOwned; use ssz::Encode; use std::str::FromStr; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000; diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 7308a4775db..87c287331c7 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,38 +1,38 @@ +use crate::Client; use crate::compute_light_client_updates::{ - compute_light_client_updates, LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY, + LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY, compute_light_client_updates, }; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; -use crate::Client; use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; use beacon_chain::{ + BeaconChain, BeaconChainTypes, MigratorConfig, ServerSentEventHandler, builder::{BeaconChainBuilder, Witness}, slot_clock::{SlotClock, SystemTimeSlotClock}, state_advance_timer::spawn_state_advance_timer, store::{HotColdDB, ItemStore, StoreConfig}, - BeaconChain, BeaconChainTypes, MigratorConfig, ServerSentEventHandler, }; use beacon_chain::{Kzg, LightClientProducerEvent}; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; use beacon_processor::{BeaconProcessorConfig, BeaconProcessorQueueLengths}; use environment::RuntimeContext; use eth2::{ - types::{BlockId, StateId}, BeaconNodeHttpClient, Error as ApiError, Timeouts, + types::{BlockId, StateId}, }; -use execution_layer::test_utils::generate_genesis_header; use execution_layer::ExecutionLayer; +use execution_layer::test_utils::generate_genesis_header; use futures::channel::mpsc::Receiver; -use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; -use lighthouse_network::{prometheus_client::registry::Registry, NetworkGlobals}; +use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state}; +use lighthouse_network::{NetworkGlobals, prometheus_client::registry::Registry}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkSenders, NetworkService}; -use rand::rngs::{OsRng, StdRng}; use rand::SeedableRng; +use rand::rngs::{OsRng, StdRng}; use slasher::Slasher; use slasher_service::SlasherService; use std::path::{Path, PathBuf}; @@ -44,8 +44,8 @@ use timer::spawn_timer; use tracing::{debug, info, warn}; use types::data_column_custody_group::get_custody_groups_ordered; use types::{ - test_utils::generate_deterministic_keypairs, BeaconState, BlobSidecarList, ChainSpec, EthSpec, - ExecutionBlockHash, Hash256, SignedBeaconBlock, + BeaconState, BlobSidecarList, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, + SignedBeaconBlock, test_utils::generate_deterministic_keypairs, }; /// Interval between polling the eth1 node for genesis information. @@ -208,7 +208,8 @@ where .import_all_data_columns(config.network.subscribe_all_data_column_subnets) .validator_monitor_config(config.validator_monitor.clone()) .rng(Box::new( - StdRng::from_rng(OsRng).map_err(|e| format!("Failed to create RNG: {:?}", e))?, + StdRng::try_from_rng(&mut OsRng) + .map_err(|e| format!("Failed to create RNG: {:?}", e))?, )); let builder = if let Some(slasher) = self.slasher.clone() { @@ -297,37 +298,37 @@ where // It doesn't make sense to try and sync the chain if we can't // verify blob availability by downloading blobs from the P2P // network. The user should do a checkpoint sync instead. - if !config.allow_insecure_genesis_sync { - if let Some(deneb_fork_epoch) = spec.deneb_fork_epoch { - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to read system time: {e:}"))? - .as_secs(); - let genesis_time = genesis_state.genesis_time(); - let deneb_time = genesis_time - + (deneb_fork_epoch.as_u64() - * E::slots_per_epoch() - * spec.seconds_per_slot); - - // Shrink the blob availability window so users don't start - // a sync right before blobs start to disappear from the P2P - // network. - let reduced_p2p_availability_epochs = spec - .min_epochs_for_blob_sidecars_requests - .saturating_sub(BLOB_AVAILABILITY_REDUCTION_EPOCHS); - let blob_availability_window = reduced_p2p_availability_epochs + if !config.allow_insecure_genesis_sync + && let Some(deneb_fork_epoch) = spec.deneb_fork_epoch + { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {e:}"))? + .as_secs(); + let genesis_time = genesis_state.genesis_time(); + let deneb_time = genesis_time + + (deneb_fork_epoch.as_u64() * E::slots_per_epoch() - * spec.seconds_per_slot; - - if now > deneb_time + blob_availability_window { - return Err( + * spec.seconds_per_slot); + + // Shrink the blob availability window so users don't start + // a sync right before blobs start to disappear from the P2P + // network. + let reduced_p2p_availability_epochs = spec + .min_epochs_for_blob_sidecars_requests + .saturating_sub(BLOB_AVAILABILITY_REDUCTION_EPOCHS); + let blob_availability_window = reduced_p2p_availability_epochs + * E::slots_per_epoch() + * spec.seconds_per_slot; + + if now > deneb_time + blob_availability_window { + return Err( "Syncing from genesis is insecure and incompatible with data availability checks. \ You should instead perform a checkpoint sync from a trusted node using the --checkpoint-sync-url option. \ For a list of public endpoints, see: https://eth-clients.github.io/checkpoint-sync-endpoints/ \ Alternatively, use --allow-insecure-genesis-sync if the risks are understood." .to_string(), ); - } } } @@ -445,7 +446,7 @@ where builder.weak_subjectivity_state(state, block, blobs, genesis_state)? } ClientGenesis::DepositContract => { - return Err("Loading genesis from deposit contract no longer supported".to_string()) + return Err("Loading genesis from deposit contract no longer supported".to_string()); } ClientGenesis::FromStore => builder.resume_from_db()?, }; diff --git a/beacon_node/client/src/compute_light_client_updates.rs b/beacon_node/client/src/compute_light_client_updates.rs index 75fa22e7954..44c3475bfe8 100644 --- a/beacon_node/client/src/compute_light_client_updates.rs +++ b/beacon_node/client/src/compute_light_client_updates.rs @@ -1,8 +1,8 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, LightClientProducerEvent}; use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; use beacon_processor::{BeaconProcessorSend, Work, WorkEvent}; -use futures::channel::mpsc::Receiver; use futures::StreamExt; +use futures::channel::mpsc::Receiver; use tracing::error; // Each `LightClientProducerEvent` is ~200 bytes. With the light_client server producing only recent diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 495df7d5f7d..e2e3613c899 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,6 +1,6 @@ +use beacon_chain::TrustedSetup; use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::validator_monitor::ValidatorMonitorConfig; -use beacon_chain::TrustedSetup; use beacon_processor::BeaconProcessorConfig; use directory::DEFAULT_ROOT_DIR; use environment::LoggerConfig; diff --git a/beacon_node/client/src/metrics.rs b/beacon_node/client/src/metrics.rs index e5c07baddc2..605a7346886 100644 --- a/beacon_node/client/src/metrics.rs +++ b/beacon_node/client/src/metrics.rs @@ -11,7 +11,7 @@ pub static SYNC_SLOTS_PER_SECOND: LazyLock> = LazyLock::new(|| pub static IS_SYNCED: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "sync_eth2_synced", - "Metric to check if the beacon chain is synced to head. 0 if not synced and non-zero if synced" + "Metric to check if the beacon chain is synced to head. 0 if not synced and non-zero if synced", ) }); diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index ea9fbe2894a..e1c5bd2293c 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,13 +1,13 @@ use crate::metrics; use beacon_chain::{ + BeaconChain, BeaconChainTypes, ExecutionStatus, bellatrix_readiness::{BellatrixReadiness, GenesisExecutionPayloadStatus, MergeConfig}, capella_readiness::CapellaReadiness, deneb_readiness::DenebReadiness, electra_readiness::ElectraReadiness, fulu_readiness::FuluReadiness, - BeaconChain, BeaconChainTypes, ExecutionStatus, }; -use lighthouse_network::{types::SyncState, NetworkGlobals}; +use lighthouse_network::{NetworkGlobals, types::SyncState}; use logging::crit; use slot_clock::SlotClock; use std::sync::Arc; diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index d3a32c7929b..e45bf477a2c 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -1,6 +1,6 @@ use crate::{ json_structures::{EncodableJsonWithdrawal, JsonWithdrawal}, - keccak::{keccak256, KeccakHasher}, + keccak::{KeccakHasher, keccak256}, }; use alloy_rlp::Encodable; use keccak_hash::KECCAK_EMPTY_LIST_RLP; @@ -80,7 +80,7 @@ mod test { use super::*; use hex::FromHex; use std::str::FromStr; - use types::{Address, Hash256, Hash64, Uint256}; + use types::{Address, Hash64, Hash256, Uint256}; fn test_rlp_encoding( header: &ExecutionBlockHeader, diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 3c82e6251bf..783b6fda86f 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -27,7 +27,7 @@ use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionRequests, KzgProofs, }; -use types::{Graffiti, GRAFFITI_BYTES_LEN}; +use types::{GRAFFITI_BYTES_LEN, Graffiti}; pub mod auth; pub mod http; diff --git a/beacon_node/execution_layer/src/engine_api/auth.rs b/beacon_node/execution_layer/src/engine_api/auth.rs index 2f4c0cd1e86..af1ca195bd1 100644 --- a/beacon_node/execution_layer/src/engine_api/auth.rs +++ b/beacon_node/execution_layer/src/engine_api/auth.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use jsonwebtoken::{encode, get_current_timestamp, Algorithm, EncodingKey, Header}; +use jsonwebtoken::{Algorithm, EncodingKey, Header, encode, get_current_timestamp}; use rand::Rng; use serde::{Deserialize, Serialize}; use zeroize::Zeroize; @@ -46,7 +46,7 @@ impl JwtKey { /// Generate a random secret. pub fn random() -> Self { - Self(rand::thread_rng().gen::<[u8; JWT_SECRET_LENGTH]>()) + Self(rand::rng().random::<[u8; JWT_SECRET_LENGTH]>()) } /// Returns a reference to the underlying byte array. diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 300713fdca4..06da8adb915 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -226,7 +226,7 @@ pub mod deposit_methods { use super::Log; use crate::HttpJsonRpc; use serde::{Deserialize, Serialize}; - use serde_json::{json, Value}; + use serde_json::{Value, json}; use std::fmt; use std::ops::Range; use std::str::FromStr; @@ -1392,7 +1392,7 @@ impl HttpJsonRpc { mod test { use super::auth::JwtKey; use super::*; - use crate::test_utils::{MockServer, DEFAULT_JWT_SECRET}; + use crate::test_utils::{DEFAULT_JWT_SECRET, MockServer}; use std::future::Future; use std::str::FromStr; use std::sync::Arc; diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 30d30481eaf..7823b732fbd 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -440,10 +440,10 @@ impl TryFrom for ExecutionRequests { // Elements of the list **MUST** be ordered by `request_type` in ascending order let current_prefix = RequestType::from_u8(*prefix_byte) .ok_or(RequestsError::InvalidPrefix(*prefix_byte))?; - if let Some(prev) = prev_prefix { - if prev.to_u8() >= current_prefix.to_u8() { - return Err(RequestsError::InvalidOrdering); - } + if let Some(prev) = prev_prefix + && prev.to_u8() >= current_prefix.to_u8() + { + return Err(RequestsError::InvalidOrdering); } prev_prefix = Some(current_prefix); diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index 23610c9ae45..85bec9e9ac2 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -1,4 +1,4 @@ -use crate::{block_hash::calculate_execution_block_hash, metrics, Error}; +use crate::{Error, block_hash::calculate_execution_block_hash, metrics}; use crate::versioned_hashes::verify_versioned_hashes; use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index c46a94c5af2..cc2bfcc7b6c 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -11,11 +11,11 @@ use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; -use tokio::sync::{watch, Mutex, RwLock}; +use tokio::sync::{Mutex, RwLock, watch}; use tokio_stream::wrappers::WatchStream; use tracing::{debug, error, info, warn}; -use types::non_zero_usize::new_non_zero_usize; use types::ExecutionBlockHash; +use types::non_zero_usize::new_non_zero_usize; /// The number of payload IDs that will be stored for each `Engine`. /// diff --git a/beacon_node/execution_layer/src/keccak.rs b/beacon_node/execution_layer/src/keccak.rs index 62e354d5030..609f7668867 100644 --- a/beacon_node/execution_layer/src/keccak.rs +++ b/beacon_node/execution_layer/src/keccak.rs @@ -11,8 +11,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -use hash256_std_hasher::Hash256StdHasher; use hash_db::Hasher; +use hash256_std_hasher::Hash256StdHasher; use types::Hash256; pub fn keccak256(bytes: &[u8]) -> Hash256 { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index bfde2be17b0..69fc342368a 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -7,28 +7,28 @@ use crate::json_structures::{BlobAndProofV1, BlobAndProofV2}; use crate::payload_cache::PayloadCache; use arc_swap::ArcSwapOption; -use auth::{strip_prefix, Auth, JwtKey}; +use auth::{Auth, JwtKey, strip_prefix}; pub use block_hash::calculate_execution_block_hash; use builder_client::BuilderHttpClient; pub use engine_api::EngineCapabilities; use engine_api::Error as ApiError; pub use engine_api::*; -pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; +pub use engine_api::{http, http::HttpJsonRpc, http::deposit_methods}; use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkchoiceState}; -use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse}; use eth2::types::{BlobsBundle, FullPayloadContents}; +use eth2::types::{ForkVersionedResponse, builder_bid::SignedBuilderBid}; use ethers_core::types::Transaction as EthersTransaction; use fixed_bytes::UintExtended; use fork_choice::ForkchoiceUpdateParameters; use logging::crit; use lru::LruCache; -use payload_status::process_payload_status; pub use payload_status::PayloadStatus; +use payload_status::process_payload_status; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use slot_clock::SlotClock; -use std::collections::{hash_map::Entry, HashMap}; +use std::collections::{HashMap, hash_map::Entry}; use std::fmt; use std::future::Future; use std::io::Write; @@ -354,11 +354,11 @@ impl ProposerPreparationDataEntry { // Update `gas_limit` if `updated.gas_limit` is `Some` and: // - `self.gas_limit` is `None`, or // - both are `Some` but the values differ. - if let Some(updated_gas_limit) = updated.gas_limit { - if self.gas_limit != Some(updated_gas_limit) { - self.gas_limit = Some(updated_gas_limit); - changed = true; - } + if let Some(updated_gas_limit) = updated.gas_limit + && self.gas_limit != Some(updated_gas_limit) + { + self.gas_limit = Some(updated_gas_limit); + changed = true; } // Update `update_epoch` if it differs @@ -740,18 +740,18 @@ impl ExecutionLayer { /// Returns the `Self::is_synced` response if unable to get latest block. pub async fn is_synced_for_notifier(&self, current_slot: Slot) -> bool { let synced = self.is_synced().await; - if synced { - if let Ok(Some(block)) = self + if synced + && let Ok(Some(block)) = self .engine() .api .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) .await - { - if block.block_number == 0 && current_slot > 0 { - return false; - } - } + && block.block_number == 0 + && current_slot > 0 + { + return false; } + synced } @@ -1479,17 +1479,17 @@ impl ExecutionLayer { let payload_attributes = self.payload_attributes(next_slot, head_block_root).await; // Compute the "lookahead", the time between when the payload will be produced and now. - if let Some(ref payload_attributes) = payload_attributes { - if let Ok(now) = SystemTime::now().duration_since(UNIX_EPOCH) { - let timestamp = Duration::from_secs(payload_attributes.timestamp()); - if let Some(lookahead) = timestamp.checked_sub(now) { - metrics::observe_duration( - &metrics::EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD, - lookahead, - ); - } else { - debug!(?timestamp, ?now, "Late payload attributes") - } + if let Some(ref payload_attributes) = payload_attributes + && let Ok(now) = SystemTime::now().duration_since(UNIX_EPOCH) + { + let timestamp = Duration::from_secs(payload_attributes.timestamp()); + if let Some(lookahead) = timestamp.checked_sub(now) { + metrics::observe_duration( + &metrics::EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD, + lookahead, + ); + } else { + debug!(?timestamp, ?now, "Late payload attributes") } } @@ -1717,14 +1717,13 @@ impl ExecutionLayer { self.engine() .request(|engine| async move { - if let Some(pow_block) = self.get_pow_block(engine, block_hash).await? { - if let Some(pow_parent) = + if let Some(pow_block) = self.get_pow_block(engine, block_hash).await? + && let Some(pow_parent) = self.get_pow_block(engine, pow_block.parent_hash).await? - { - return Ok(Some( - self.is_valid_terminal_pow_block(pow_block, pow_parent, spec), - )); - } + { + return Ok(Some( + self.is_valid_terminal_pow_block(pow_block, pow_parent, spec), + )); } Ok(None) }) @@ -2302,12 +2301,13 @@ mod test { let (mock, block_hash) = MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .produce_forked_pow_block(); - assert!(mock - .el - .is_valid_terminal_pow_block_hash(block_hash, &mock.spec) - .await - .unwrap() - .unwrap()); + assert!( + mock.el + .is_valid_terminal_pow_block_hash(block_hash, &mock.spec) + .await + .unwrap() + .unwrap() + ); } #[tokio::test] diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index aba8434c8e3..859f33bc813 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -41,17 +41,17 @@ pub static EXECUTION_LAYER_REQUEST_TIMES: LazyLock> = LazyL pub static EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD: LazyLock> = LazyLock::new(|| { try_create_histogram( - "execution_layer_payload_attributes_lookahead", - "Duration between an fcU call with PayloadAttributes and when the block should be produced", - ) + "execution_layer_payload_attributes_lookahead", + "Duration between an fcU call with PayloadAttributes and when the block should be produced", + ) }); pub static EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID: LazyLock> = LazyLock::new( || { try_create_int_counter_vec( - "execution_layer_pre_prepared_payload_id", - "Indicates hits or misses for already having prepared a payload id before payload production", - &["event"] - ) + "execution_layer_pre_prepared_payload_id", + "Indicates hits or misses for already having prepared a payload id before payload production", + &["event"], + ) }, ); pub static EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE: LazyLock> = @@ -113,7 +113,7 @@ pub static EXECUTION_LAYER_PAYLOAD_BIDS: LazyLock> = LazyLoc try_create_int_gauge_vec( "execution_layer_payload_bids", "The gwei bid value of payloads received by local EEs or builders. Only shows values up to i64::MAX.", - &["source"] + &["source"], ) }); pub static EXECUTION_LAYER_INFO: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index bbfd30239de..efe7d2cf91f 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -44,8 +44,7 @@ pub fn process_payload_status( } else { let error = format!( "new_payload: response.status = VALID but invalid latest_valid_hash. Expected({:?}) Found({:?})", - head_block_hash, - response.latest_valid_hash + head_block_hash, response.latest_valid_hash ); Err(EngineError::Api { error: ApiError::BadResponse(error), diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index aefb6d67503..e2b08063fe5 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,15 +1,15 @@ +use crate::EthersTransaction; use crate::engine_api::{ + ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, json_structures::{ JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, }, - ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, }; use crate::engines::ForkchoiceState; -use crate::EthersTransaction; use eth2::types::BlobsBundle; use kzg::{Kzg, KzgCommitment, KzgProof}; use parking_lot::Mutex; -use rand::{rngs::StdRng, Rng, SeedableRng}; +use rand::{Rng, SeedableRng, rngs::StdRng}; use serde::{Deserialize, Serialize}; use ssz::Decode; use ssz_types::VariableList; @@ -178,7 +178,7 @@ impl ExecutionBlockGenerator { spec: Arc, kzg: Option>, ) -> Self { - let mut gen = Self { + let mut generator = Self { head_block: <_>::default(), finalized_block_hash: <_>::default(), blocks: <_>::default(), @@ -200,9 +200,9 @@ impl ExecutionBlockGenerator { spec, }; - gen.insert_pow_block(0).unwrap(); + generator.insert_pow_block(0).unwrap(); - gen + generator } pub fn latest_block(&self) -> Option> { @@ -509,10 +509,10 @@ impl ExecutionBlockGenerator { // This is meant to cover starting post-merge transition at genesis. Useful for // testing Capella forks and later. let head_block_hash = forkchoice_state.head_block_hash; - if let Some(genesis_pow_block) = self.block_by_number(0) { - if genesis_pow_block.block_hash() == head_block_hash { - self.terminal_block_hash = head_block_hash; - } + if let Some(genesis_pow_block) = self.block_by_number(0) + && genesis_pow_block.block_hash() == head_block_hash + { + self.terminal_block_hash = head_block_hash; } if let Some(payload) = self.pending_payloads.remove(&head_block_hash) { @@ -711,7 +711,7 @@ impl ExecutionBlockGenerator { // TODO(EIP-7892): see FIXME below // FIXME: this will break with BPO forks. This function needs to calculate the epoch based on block timestamp.. let max_blobs = self.spec.max_blobs_per_block_within_fork(fork_name) as usize; - let num_blobs = rng.gen_range(self.min_blobs_count..=max_blobs); + let num_blobs = rng.random_range(self.min_blobs_count..=max_blobs); let (bundle, transactions) = generate_blobs(num_blobs, fork_name)?; for tx in Vec::from(transactions) { execution_payload @@ -753,8 +753,8 @@ pub fn load_test_blobs_bundle_v1() -> Result<(KzgCommitment, KzgProo )) } -pub fn load_test_blobs_bundle_v2( -) -> Result<(KzgCommitment, KzgProofs, Blob), String> { +pub fn load_test_blobs_bundle_v2() +-> Result<(KzgCommitment, KzgProofs, Blob), String> { let BlobsBundle:: { commitments, proofs, @@ -937,7 +937,7 @@ pub fn generate_pow_block( #[cfg(test)] mod test { use super::*; - use kzg::{trusted_setup::get_trusted_setup, Bytes48, CellRef, KzgBlobRef, TrustedSetup}; + use kzg::{Bytes48, CellRef, KzgBlobRef, TrustedSetup, trusted_setup::get_trusted_setup}; use types::{MainnetEthSpec, MinimalEthSpec}; #[test] diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 70c21afed45..24b30b12e03 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -2,7 +2,7 @@ use super::Context; use crate::engine_api::{http::*, *}; use crate::json_structures::*; use crate::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI}; -use serde::{de::DeserializeOwned, Deserialize}; +use serde::{Deserialize, de::DeserializeOwned}; use serde_json::Value as JsonValue; use std::sync::Arc; diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 751e99494ce..38fbc73fb41 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -7,8 +7,8 @@ use eth2::types::{ ProposerData, StateId, ValidatorId, }; use eth2::{ - BeaconNodeHttpClient, Timeouts, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, - SSZ_CONTENT_TYPE_HEADER, + BeaconNodeHttpClient, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER, + Timeouts, }; use fork_choice::ForkchoiceUpdateParameters; use parking_lot::RwLock; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index cbe5e3ae989..4b56c771294 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -1,6 +1,6 @@ use crate::{ test_utils::{ - MockServer, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, + DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, MockServer, }, *, }; @@ -168,10 +168,11 @@ impl MockExecutionLayer { assert_eq!(payload.prev_randao(), prev_randao); // Ensure the payload cache is empty. - assert!(self - .el - .get_payload_by_root(&payload.tree_hash_root()) - .is_none()); + assert!( + self.el + .get_payload_by_root(&payload.tree_hash_root()) + .is_none() + ); let builder_params = BuilderParams { pubkey: PublicKeyBytes::empty(), slot, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 245aa71a157..efc0d3f89be 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -2,7 +2,7 @@ use crate::engine_api::auth::JwtKey; use crate::engine_api::{ - auth::Auth, http::JSONRPC_VERSION, ExecutionBlock, PayloadStatusV1, PayloadStatusV1Status, + ExecutionBlock, PayloadStatusV1, PayloadStatusV1Status, auth::Auth, http::JSONRPC_VERSION, }; use crate::json_structures::JsonClientVersionV1; use bytes::Bytes; @@ -23,16 +23,16 @@ use std::sync::{Arc, LazyLock}; use tokio::{runtime, sync::oneshot}; use tracing::info; use types::{ChainSpec, EthSpec, ExecutionBlockHash, Uint256}; -use warp::{http::StatusCode, Filter, Rejection}; +use warp::{Filter, Rejection, http::StatusCode}; use crate::EngineCapabilities; pub use execution_block_generator::DEFAULT_GAS_LIMIT; pub use execution_block_generator::{ - generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block, - mock_el_extra_data, static_valid_tx, Block, ExecutionBlockGenerator, + Block, ExecutionBlockGenerator, generate_blobs, generate_genesis_block, + generate_genesis_header, generate_pow_block, mock_el_extra_data, static_valid_tx, }; pub use hook::Hook; -pub use mock_builder::{mock_builder_extra_data, MockBuilder, Operation}; +pub use mock_builder::{MockBuilder, Operation, mock_builder_extra_data}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index 726ca0a42d0..dfa4daab9ae 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -194,7 +194,7 @@ pub fn interop_genesis_state_with_eth1( #[cfg(test)] mod test { use super::*; - use types::{test_utils::generate_deterministic_keypairs, MinimalEthSpec}; + use types::{MinimalEthSpec, test_utils::generate_deterministic_keypairs}; type TestEthSpec = MinimalEthSpec; diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 35f0b0e3801..08af792415f 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -2,7 +2,7 @@ mod common; mod interop; pub use interop::{ - bls_withdrawal_credentials, interop_genesis_state, interop_genesis_state_with_eth1, - InteropGenesisBuilder, DEFAULT_ETH1_BLOCK_HASH, + DEFAULT_ETH1_BLOCK_HASH, InteropGenesisBuilder, bls_withdrawal_credentials, + interop_genesis_state, interop_genesis_state_with_eth1, }; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/beacon_node/http_api/src/aggregate_attestation.rs b/beacon_node/http_api/src/aggregate_attestation.rs index d62b3a0a4ac..183d29df228 100644 --- a/beacon_node/http_api/src/aggregate_attestation.rs +++ b/beacon_node/http_api/src/aggregate_attestation.rs @@ -1,6 +1,6 @@ use crate::api_types::GenericResponse; use crate::unsupported_version_rejection; -use crate::version::{add_consensus_version_header, V1, V2}; +use crate::version::{V1, V2, add_consensus_version_header}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::types::{self, EndpointVersion, Hash256, Slot}; use std::sync::Arc; diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 23ab5e3752b..6e285829d22 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -3,7 +3,7 @@ use eth2::lighthouse::{ AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, }; use state_processing::{ - per_epoch_processing::EpochProcessingSummary, BlockReplayError, BlockReplayer, + BlockReplayError, BlockReplayer, per_epoch_processing::EpochProcessingSummary, }; use std::sync::Arc; use types::{BeaconState, BeaconStateError, EthSpec, Hash256}; diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index e33de254704..2ecc1dd6155 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,5 +1,5 @@ use crate::version::inconsistent_fork_rejection; -use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic}; +use crate::{ExecutionOptimistic, state_id::checkpoint_slot_and_execution_optimistic}; use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlobIndicesQuery; @@ -419,9 +419,9 @@ impl BlockId { }, ) } else { - Err(warp_utils::reject::custom_server_error( - format!("Insufficient data columns to reconstruct blobs: required {num_required_columns}, but only {num_found_column_keys} were found.") - )) + Err(warp_utils::reject::custom_server_error(format!( + "Insufficient data columns to reconstruct blobs: required {num_required_columns}, but only {num_found_column_keys} were found." + ))) } } } diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index 249a6732dcb..3772470b281 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -4,7 +4,7 @@ use eth2::lighthouse::{ }; use parking_lot::Mutex; use state_processing::{ - per_epoch_processing::EpochProcessingSummary, BlockReplayError, BlockReplayer, + BlockReplayError, BlockReplayer, per_epoch_processing::EpochProcessingSummary, }; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; diff --git a/beacon_node/http_api/src/builder_states.rs b/beacon_node/http_api/src/builder_states.rs index 40b38157365..7c05dd00d26 100644 --- a/beacon_node/http_api/src/builder_states.rs +++ b/beacon_node/http_api/src/builder_states.rs @@ -21,15 +21,14 @@ pub fn get_next_withdrawals( // advance the state to the epoch of the proposal slot. let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); let (state_root, _, _) = state_id.root(chain)?; - if proposal_epoch != state.current_epoch() { - if let Err(e) = + if proposal_epoch != state.current_epoch() + && let Err(e) = partial_state_advance(&mut state, Some(state_root), proposal_slot, &chain.spec) - { - return Err(warp_utils::reject::custom_server_error(format!( - "failed to advance to the epoch of the proposal slot: {:?}", - e - ))); - } + { + return Err(warp_utils::reject::custom_server_error(format!( + "failed to advance to the epoch of the proposal slot: {:?}", + e + ))); } match get_expected_withdrawals(&state, &chain.spec) { diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 870276c0802..b0b4f9df56f 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -36,9 +36,9 @@ use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; use crate::version::beacon_response; use beacon_chain::{ - attestation_verification::VerifiedAttestation, observed_operations::ObservationOutcome, - validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError, - BeaconChainTypes, WhenSlotSkipped, + AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, + WhenSlotSkipped, attestation_verification::VerifiedAttestation, + observed_operations::ObservationOutcome, validator_monitor::timestamp_now, }; use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; @@ -54,14 +54,14 @@ use eth2::types::{ use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; use lighthouse_network::rpc::methods::MetaData; -use lighthouse_network::{types::SyncState, Enr, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; +use lighthouse_network::{Enr, EnrExt, NetworkGlobals, PeerId, PubsubMessage, types::SyncState}; use lighthouse_version::version_with_platform; -use logging::{crit, SSELoggingComponents}; +use logging::{SSELoggingComponents, crit}; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; pub use publish_blocks::{ - publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock, + ProvenancedBlock, publish_blinded_block, publish_block, reconstruct_block, }; use serde::{Deserialize, Serialize}; use slot_clock::SlotClock; @@ -82,8 +82,8 @@ use tokio::sync::{ oneshot, }; use tokio_stream::{ - wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}, StreamExt, + wrappers::{BroadcastStream, errors::BroadcastStreamRecvError}, }; use tracing::{debug, error, info, warn}; use types::{ @@ -96,15 +96,15 @@ use types::{ }; use validator::pubkey_to_validator_index; use version::{ - add_consensus_version_header, add_ssz_content_type_header, + ResponseIncludesVersion, V1, V2, V3, add_consensus_version_header, add_ssz_content_type_header, execution_optimistic_finalized_beacon_response, inconsistent_fork_rejection, - unsupported_version_rejection, ResponseIncludesVersion, V1, V2, V3, + unsupported_version_rejection, }; +use warp::Reply; use warp::http::StatusCode; use warp::hyper::Body; use warp::sse::Event; -use warp::Reply; -use warp::{http::Response, Filter, Rejection}; +use warp::{Filter, Rejection, http::Response}; use warp_utils::{query::multi_key_query, reject::convert_rejection, uor::UnifyingOrFilter}; const API_PREFIX: &str = "eth"; @@ -920,79 +920,76 @@ pub fn serve( None }; - let committee_cache = if let Some(shuffling) = - maybe_cached_shuffling - { - shuffling - } else { - let possibly_built_cache = - match RelativeEpoch::from_epoch(current_epoch, epoch) { - Ok(relative_epoch) - if state.committee_cache_is_initialized( - relative_epoch, - ) => - { - state.committee_cache(relative_epoch).cloned() + let committee_cache = + if let Some(shuffling) = maybe_cached_shuffling { + shuffling + } else { + let possibly_built_cache = + match RelativeEpoch::from_epoch(current_epoch, epoch) { + Ok(relative_epoch) + if state.committee_cache_is_initialized( + relative_epoch, + ) => + { + state.committee_cache(relative_epoch).cloned() + } + _ => CommitteeCache::initialized( + state, + epoch, + &chain.spec, + ), } - _ => CommitteeCache::initialized( - state, - epoch, - &chain.spec, - ), - } - .map_err(|e| { - match e { - BeaconStateError::EpochOutOfBounds => { - let max_sprp = - T::EthSpec::slots_per_historical_root() - as u64; - let first_subsequent_restore_point_slot = - ((epoch.start_slot( - T::EthSpec::slots_per_epoch(), - ) / max_sprp) - + 1) - * max_sprp; - if epoch < current_epoch { - warp_utils::reject::custom_bad_request( - format!( + .map_err( + |e| match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = + T::EthSpec::slots_per_historical_root() + as u64; + let first_subsequent_restore_point_slot = + ((epoch.start_slot( + T::EthSpec::slots_per_epoch(), + ) / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request( + format!( "epoch out of bounds, \ try state at slot {}", first_subsequent_restore_point_slot, ), - ) - } else { - warp_utils::reject::custom_bad_request( - "epoch out of bounds, \ + ) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, \ too far in future" - .into(), - ) + .into(), + ) + } } - } - _ => warp_utils::reject::unhandled_error( - BeaconChainError::from(e), - ), - } - })?; - - // Attempt to write to the beacon cache (only if the cache - // size is not the default value). - if chain.config.shuffling_cache_size - != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE - { - if let Some(shuffling_id) = shuffling_id { - if let Some(mut cache_write) = chain + _ => warp_utils::reject::unhandled_error( + BeaconChainError::from(e), + ), + }, + )?; + + // Attempt to write to the beacon cache (only if the cache + // size is not the default value). + if chain.config.shuffling_cache_size + != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE + && let Some(shuffling_id) = shuffling_id + && let Some(mut cache_write) = chain .shuffling_cache .try_write_for(std::time::Duration::from_secs(1)) - { - cache_write.insert_committee_cache( - shuffling_id, - &possibly_built_cache, - ); - } + { + cache_write.insert_committee_cache( + shuffling_id, + &possibly_built_cache, + ); } - } - possibly_built_cache - }; + + possibly_built_cache + }; // Use either the supplied slot or all slots in the epoch. let slots = @@ -1340,13 +1337,13 @@ pub fn serve( // If the parent root was supplied, check that it matches the block // obtained via a slot lookup. - if let Some(parent_root) = parent_root_opt { - if block.parent_root() != parent_root { - return Err(warp_utils::reject::custom_not_found(format!( - "no canonical block at slot {} with parent root {}", - slot, parent_root - ))); - } + if let Some(parent_root) = parent_root_opt + && block.parent_root() != parent_root + { + return Err(warp_utils::reject::custom_not_found(format!( + "no canonical block at slot {} with parent root {}", + slot, parent_root + ))); } (root, block, execution_optimistic, finalized) diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index f9559d738ea..072dee97fba 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -1,6 +1,6 @@ use crate::version::{ - add_consensus_version_header, add_ssz_content_type_header, beacon_response, - ResponseIncludesVersion, + ResponseIncludesVersion, add_consensus_version_header, add_ssz_content_type_header, + beacon_response, }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{ @@ -11,9 +11,9 @@ use ssz::Encode; use std::sync::Arc; use types::{BeaconResponse, EthSpec, ForkName, Hash256, LightClientBootstrap}; use warp::{ + Rejection, hyper::{Body, Response}, reply::Reply, - Rejection, }; const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index db82ff214c8..932fb001791 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -1,10 +1,9 @@ use crate::{ build_block_contents, version::{ - add_consensus_block_value_header, add_consensus_version_header, + ResponseIncludesVersion, add_consensus_block_value_header, add_consensus_version_header, add_execution_payload_blinded_header, add_execution_payload_value_header, add_ssz_content_type_header, beacon_response, inconsistent_fork_rejection, - ResponseIncludesVersion, }, }; use beacon_chain::{ @@ -15,8 +14,8 @@ use ssz::Encode; use std::sync::Arc; use types::{payload::BlockProductionVersion, *}; use warp::{ - hyper::{Body, Response}, Reply, + hyper::{Body, Response}, }; /// If default boost factor is provided in validator/blocks v3 request, we will skip the calculation diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 44286736f31..3705c399bd7 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -2,8 +2,8 @@ use crate::state_id::StateId; use beacon_chain::{ - beacon_proposer_cache::{compute_proposer_duties_from_head, ensure_state_is_in_epoch}, BeaconChain, BeaconChainError, BeaconChainTypes, + beacon_proposer_cache::{compute_proposer_duties_from_head, ensure_state_is_in_epoch}, }; use eth2::types::{self as api_types}; use safe_arith::SafeArith; @@ -121,7 +121,7 @@ fn try_proposer_duties_from_cache( return Err(warp_utils::reject::custom_server_error(format!( "head epoch {} is later than request epoch {}", head_epoch, request_epoch - ))) + ))); } }; diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index a4fcb27b1db..947edf56d95 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -36,8 +36,8 @@ //! attestations and there's no immediate cause for concern. use crate::task_spawner::{Priority, TaskSpawner}; use beacon_chain::{ - validator_monitor::timestamp_now, AttestationError, BeaconChain, BeaconChainError, - BeaconChainTypes, + AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes, + validator_monitor::timestamp_now, }; use beacon_processor::work_reprocessing_queue::{QueuedUnaggregate, ReprocessQueueMessage}; use beacon_processor::{Work, WorkEvent}; diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 352c06e1740..6377639ccda 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -6,8 +6,8 @@ use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ - build_blob_data_column_sidecars, AvailabilityProcessingStatus, BeaconChain, BeaconChainError, - BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, NotifyExecutionLayer, + AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, + IntoGossipVerifiedBlock, NotifyExecutionLayer, build_blob_data_column_sidecars, }; use eth2::types::{ BlobsBundle, BroadcastValidation, ErrorMessage, ExecutionPayloadAndBlobs, FullPayloadContents, @@ -20,8 +20,8 @@ use network::NetworkMessage; use rand::prelude::SliceRandom; use slot_clock::SlotClock; use std::marker::PhantomData; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; use tracing::{debug, error, info, warn}; @@ -32,7 +32,7 @@ use types::{ FullPayloadBellatrix, Hash256, KzgProofs, SignedBeaconBlock, SignedBlindedBeaconBlock, }; use warp::http::StatusCode; -use warp::{reply::Response, Rejection, Reply}; +use warp::{Rejection, Reply, reply::Response}; pub type UnverifiedBlobs = Option<( KzgProofs<::EthSpec>, diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs index 2f78649d78f..fda8f0ad1d7 100644 --- a/beacon_node/http_api/src/standard_block_rewards.rs +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -1,6 +1,6 @@ -use crate::sync_committee_rewards::get_state_before_applying_block; use crate::BlockId; use crate::ExecutionOptimistic; +use crate::sync_committee_rewards::get_state_before_applying_block; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::types::StandardBlockReward; use std::sync::Arc; diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index a9f66de4678..13fb9b2c585 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -1,5 +1,5 @@ -use crate::metrics; use crate::ExecutionOptimistic; +use crate::metrics; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::StateId as CoreStateId; use std::fmt; diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index 57c74f8d019..edda0e60a61 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -5,8 +5,8 @@ use beacon_chain::sync_committee_verification::{ Error as SyncVerificationError, VerifiedSyncCommitteeMessage, }; use beacon_chain::{ - validator_monitor::timestamp_now, BeaconChain, BeaconChainError, BeaconChainTypes, - StateSkipConfig, + BeaconChain, BeaconChainError, BeaconChainTypes, StateSkipConfig, + validator_monitor::timestamp_now, }; use eth2::types::{self as api_types}; use lighthouse_network::PubsubMessage; @@ -17,8 +17,8 @@ use std::collections::HashMap; use tokio::sync::mpsc::UnboundedSender; use tracing::{debug, error, warn}; use types::{ - slot_data::SlotData, BeaconStateError, Epoch, EthSpec, SignedContributionAndProof, - SyncCommitteeMessage, SyncDuty, SyncSubnetId, + BeaconStateError, Epoch, EthSpec, SignedContributionAndProof, SyncCommitteeMessage, SyncDuty, + SyncSubnetId, slot_data::SlotData, }; /// The struct that is returned to the requesting HTTP client. @@ -49,7 +49,7 @@ pub fn sync_committee_duties( return Ok(convert_to_response( verify_unknown_validators(duties, request_epoch, chain)?, execution_optimistic, - )) + )); } Err(BeaconChainError::SyncDutiesError(BeaconStateError::SyncCommitteeNotKnown { .. @@ -273,15 +273,15 @@ pub fn process_sync_committee_signatures( } } - if let Some(verified) = verified_for_pool { - if let Err(e) = chain.add_to_naive_sync_aggregation_pool(verified) { - error!( - error = ?e, - slot = %sync_committee_signature.slot, - validator_index = sync_committee_signature.validator_index, - "Unable to add sync committee signature to pool" - ); - } + if let Some(verified) = verified_for_pool + && let Err(e) = chain.add_to_naive_sync_aggregation_pool(verified) + { + error!( + error = ?e, + slot = %sync_committee_signature.slot, + validator_index = sync_committee_signature.validator_index, + "Unable to add sync committee signature to pool" + ); } } diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index a52df6c863f..90f2fd2d955 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,7 +1,7 @@ use crate::{Config, Context}; use beacon_chain::{ - test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType}, BeaconChain, BeaconChainTypes, + test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType}, }; use beacon_processor::{ BeaconProcessor, BeaconProcessorChannels, BeaconProcessorConfig, BeaconProcessorQueueLengths, @@ -10,14 +10,14 @@ use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; use lighthouse_network::rpc::methods::MetaDataV3; use lighthouse_network::{ + ConnectedPoint, Enr, NetworkConfig, NetworkGlobals, PeerId, PeerManager, discv5::enr::CombinedKey, libp2p::swarm::{ - behaviour::{ConnectionEstablished, FromSwarm}, ConnectionId, NetworkBehaviour, + behaviour::{ConnectionEstablished, FromSwarm}, }, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, - ConnectedPoint, Enr, NetworkConfig, NetworkGlobals, PeerId, PeerManager, }; use network::{NetworkReceivers, NetworkSenders}; use sensitive_url::SensitiveUrl; @@ -132,7 +132,7 @@ impl InteractiveTester { pub async fn create_api_server( chain: Arc>, test_runtime: &TestRuntime, -) -> ApiServer> { +) -> ApiServer + use> { create_api_server_with_config(chain, Config::default(), test_runtime).await } @@ -140,7 +140,7 @@ pub async fn create_api_server_with_config( chain: Arc>, http_config: Config, test_runtime: &TestRuntime, -) -> ApiServer> { +) -> ApiServer + use> { // Use port 0 to allocate a new unused port. let port = 0; diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index 80a9ed896db..1538215a0b5 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -1,5 +1,5 @@ use beacon_chain::{ - validator_monitor::HISTORIC_EPOCHS, BeaconChain, BeaconChainError, BeaconChainTypes, + BeaconChain, BeaconChainError, BeaconChainTypes, validator_monitor::HISTORIC_EPOCHS, }; use eth2::types::{Epoch, ValidatorStatus}; use serde::{Deserialize, Serialize}; @@ -126,23 +126,22 @@ pub fn get_validator_info( let mut validators = HashMap::new(); for id in ids { - if let Ok(index) = id.parse::() { - if let Some(validator) = chain + if let Ok(index) = id.parse::() + && let Some(validator) = chain .validator_monitor .read() .get_monitored_validator(index) - { - let mut info = vec![]; - for epoch in epochs.clone() { - if let Some(total_balance) = validator.get_total_balance(Epoch::new(epoch)) { - info.push(ValidatorInfoValues { - epoch, - total_balance, - }); - } + { + let mut info = vec![]; + for epoch in epochs.clone() { + if let Some(total_balance) = validator.get_total_balance(Epoch::new(epoch)) { + info.push(ValidatorInfoValues { + epoch, + total_balance, + }); } - validators.insert(id.clone(), ValidatorInfo { info }); } + validators.insert(id.clone(), ValidatorInfo { info }); } } @@ -198,58 +197,57 @@ pub fn post_validator_monitor_metrics( let mut validators = HashMap::new(); for id in ids { - if let Ok(index) = id.parse::() { - if let Some(validator) = chain + if let Ok(index) = id.parse::() + && let Some(validator) = chain .validator_monitor .read() .get_monitored_validator(index) - { - let val_metrics = validator.metrics.read(); - let attestation_hits = val_metrics.attestation_hits; - let attestation_misses = val_metrics.attestation_misses; - let attestation_head_hits = val_metrics.attestation_head_hits; - let attestation_head_misses = val_metrics.attestation_head_misses; - let attestation_target_hits = val_metrics.attestation_target_hits; - let attestation_target_misses = val_metrics.attestation_target_misses; - let latest_attestation_inclusion_distance = - val_metrics.latest_attestation_inclusion_distance; - drop(val_metrics); - - let attestations = attestation_hits + attestation_misses; - let attestation_hit_percentage: f64 = if attestations == 0 { - 0.0 - } else { - (100 * attestation_hits / attestations) as f64 - }; - let head_attestations = attestation_head_hits + attestation_head_misses; - let attestation_head_hit_percentage: f64 = if head_attestations == 0 { - 0.0 - } else { - (100 * attestation_head_hits / head_attestations) as f64 - }; - - let target_attestations = attestation_target_hits + attestation_target_misses; - let attestation_target_hit_percentage: f64 = if target_attestations == 0 { - 0.0 - } else { - (100 * attestation_target_hits / target_attestations) as f64 - }; - - let metrics = ValidatorMetrics { - attestation_hits, - attestation_misses, - attestation_hit_percentage, - attestation_head_hits, - attestation_head_misses, - attestation_head_hit_percentage, - attestation_target_hits, - attestation_target_misses, - attestation_target_hit_percentage, - latest_attestation_inclusion_distance, - }; - - validators.insert(id.clone(), metrics); - } + { + let val_metrics = validator.metrics.read(); + let attestation_hits = val_metrics.attestation_hits; + let attestation_misses = val_metrics.attestation_misses; + let attestation_head_hits = val_metrics.attestation_head_hits; + let attestation_head_misses = val_metrics.attestation_head_misses; + let attestation_target_hits = val_metrics.attestation_target_hits; + let attestation_target_misses = val_metrics.attestation_target_misses; + let latest_attestation_inclusion_distance = + val_metrics.latest_attestation_inclusion_distance; + drop(val_metrics); + + let attestations = attestation_hits + attestation_misses; + let attestation_hit_percentage: f64 = if attestations == 0 { + 0.0 + } else { + (100 * attestation_hits / attestations) as f64 + }; + let head_attestations = attestation_head_hits + attestation_head_misses; + let attestation_head_hit_percentage: f64 = if head_attestations == 0 { + 0.0 + } else { + (100 * attestation_head_hits / head_attestations) as f64 + }; + + let target_attestations = attestation_target_hits + attestation_target_misses; + let attestation_target_hit_percentage: f64 = if target_attestations == 0 { + 0.0 + } else { + (100 * attestation_target_hits / target_attestations) as f64 + }; + + let metrics = ValidatorMetrics { + attestation_hits, + attestation_misses, + attestation_hit_percentage, + attestation_head_hits, + attestation_head_misses, + attestation_head_hit_percentage, + attestation_target_hits, + attestation_target_misses, + attestation_target_hit_percentage, + latest_attestation_inclusion_distance, + }; + + validators.insert(id.clone(), metrics); } } diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index dd4e137ce66..16010b63f38 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -4,7 +4,7 @@ use eth2::{ lighthouse::{GlobalValidatorInclusionData, ValidatorInclusionData}, types::ValidatorId, }; -use state_processing::per_epoch_processing::{process_epoch, EpochProcessingSummary}; +use state_processing::per_epoch_processing::{EpochProcessingSummary, process_epoch}; use types::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec}; /// Returns the state in the last slot of `epoch`. diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 361e8e78eae..871a10e7d4a 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -5,11 +5,11 @@ use eth2::{ }; use serde::Serialize; use types::{ + BeaconResponse, ForkName, ForkVersionedResponse, InconsistentFork, Uint256, + UnversionedResponse, beacon_response::{ ExecutionOptimisticFinalizedBeaconResponse, ExecutionOptimisticFinalizedMetadata, }, - BeaconResponse, ForkName, ForkVersionedResponse, InconsistentFork, Uint256, - UnversionedResponse, }; use warp::reply::{self, Reply, Response}; diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 78b52f86e3d..ff90b18e67e 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,12 +1,12 @@ use beacon_chain::test_utils::test_spec; use beacon_chain::{ - test_utils::{AttestationStrategy, BlockStrategy}, GossipVerifiedBlock, IntoGossipVerifiedBlock, + test_utils::{AttestationStrategy, BlockStrategy}, }; use eth2::reqwest::StatusCode; use eth2::types::{BroadcastValidation, PublishBlockRequest}; use http_api::test_utils::InteractiveTester; -use http_api::{publish_blinded_block, publish_block, reconstruct_block, Config, ProvenancedBlock}; +use http_api::{Config, ProvenancedBlock, publish_blinded_block, publish_block, reconstruct_block}; use std::collections::HashSet; use std::sync::Arc; use types::{ @@ -175,10 +175,12 @@ pub async fn gossip_full_pass() { .await; assert!(response.is_ok()); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); } // This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. @@ -221,10 +223,12 @@ pub async fn gossip_full_pass_ssz() { .await; assert!(response.is_ok()); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root()) + ); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. @@ -384,10 +388,12 @@ pub async fn consensus_partial_pass_only_consensus() { .await; assert!(publication_result.is_ok(), "{publication_result:?}"); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block_b_root)); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block_b_root) + ); } /// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. @@ -429,10 +435,12 @@ pub async fn consensus_full_pass() { .await; assert!(response.is_ok()); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -530,18 +538,22 @@ pub async fn equivocation_consensus_early_equivocation() { assert_ne!(block_a.state_root(), block_b.state_root()); /* submit `block_a` as valid */ - assert!(tester - .client - .post_beacon_blocks_v2_ssz( - &PublishBlockRequest::new(block_a.clone(), blobs_a), - validation_level - ) - .await - .is_ok()); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block_a.canonical_root())); + assert!( + tester + .client + .post_beacon_blocks_v2_ssz( + &PublishBlockRequest::new(block_a.clone(), blobs_a), + validation_level + ) + .await + .is_ok() + ); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block_a.canonical_root()) + ); /* submit `block_b` which should induce equivocation */ let response: Result<(), eth2::Error> = tester @@ -724,10 +736,12 @@ pub async fn equivocation_full_pass() { .await; assert!(response.is_ok()); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=gossip`. @@ -860,10 +874,12 @@ pub async fn blinded_gossip_full_pass() { .await; assert!(response.is_ok()); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&blinded_block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&blinded_block.canonical_root()) + ); } // This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. @@ -902,10 +918,12 @@ pub async fn blinded_gossip_full_pass_ssz() { .await; assert!(response.is_ok()); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&blinded_block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&blinded_block.canonical_root()) + ); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. @@ -1039,10 +1057,12 @@ pub async fn blinded_consensus_full_pass() { .await; assert!(response.is_ok()); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&blinded_block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&blinded_block.canonical_root()) + ); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -1142,15 +1162,19 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { assert_ne!(block_a.state_root(), block_b.state_root()); /* submit `block_a` as valid */ - assert!(tester - .client - .post_beacon_blinded_blocks_v2(&block_a, validation_level) - .await - .is_ok()); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block_a.canonical_root())); + assert!( + tester + .client + .post_beacon_blinded_blocks_v2(&block_a, validation_level) + .await + .is_ok() + ); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block_a.canonical_root()) + ); /* submit `block_b` which should induce equivocation */ let response: Result<(), eth2::Error> = tester @@ -1352,10 +1376,12 @@ pub async fn blinded_equivocation_full_pass() { .await; assert!(response.is_ok()); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); } /// This test checks that an HTTP POST request with the block & blobs/columns succeeds with a 200 response @@ -1402,10 +1428,12 @@ pub async fn block_seen_on_gossip_without_blobs_or_columns() { .unwrap(); // It should not yet be added to fork choice because blobs have not been seen. - assert!(!tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + !tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); // Post the block *and* blobs to the HTTP API. let response: Result<(), eth2::Error> = tester @@ -1418,10 +1446,12 @@ pub async fn block_seen_on_gossip_without_blobs_or_columns() { // This should result in the block being fully imported. response.unwrap(); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); } /// This test checks that an HTTP POST request with the block & blobs/columns succeeds with a 200 response @@ -1486,10 +1516,12 @@ pub async fn block_seen_on_gossip_with_some_blobs_or_columns() { .await; // It should not yet be added to fork choice because all blobs have not been seen. - assert!(!tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + !tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); // Post the block *and* all blobs to the HTTP API. let response: Result<(), eth2::Error> = tester @@ -1502,10 +1534,12 @@ pub async fn block_seen_on_gossip_with_some_blobs_or_columns() { // This should result in the block being fully imported. response.unwrap(); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); } /// This test checks that an HTTP POST request with the block & blobs/columns succeeds with a 200 response @@ -1557,10 +1591,12 @@ pub async fn blobs_or_columns_seen_on_gossip_without_block() { .await; // It should not yet be added to fork choice because the block has not been seen. - assert!(!tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + !tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); // Post the block *and* all blobs to the HTTP API. let response: Result<(), eth2::Error> = tester @@ -1573,10 +1609,12 @@ pub async fn blobs_or_columns_seen_on_gossip_without_block() { // This should result in the block being fully imported. response.unwrap(); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); } /// This test checks that an HTTP POST request with the block succeeds with a 200 response @@ -1628,10 +1666,12 @@ async fn blobs_or_columns_seen_on_gossip_without_block_and_no_http_blobs_or_colu .await; // It should not yet be added to fork choice because the block has not been seen. - assert!(!tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + !tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); // Post just the block to the HTTP API (blob lists are empty). let response: Result<(), eth2::Error> = tester @@ -1647,10 +1687,12 @@ async fn blobs_or_columns_seen_on_gossip_without_block_and_no_http_blobs_or_colu // This should result in the block being fully imported. response.unwrap(); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -1702,10 +1744,12 @@ async fn slashable_blobs_or_columns_seen_on_gossip_cause_failure() { .await; // It should not yet be added to fork choice because block B has not been seen. - assert!(!tester - .harness - .chain - .block_is_known_to_fork_choice(&block_b.canonical_root())); + assert!( + !tester + .harness + .chain + .block_is_known_to_fork_choice(&block_b.canonical_root()) + ); // Post block A *and* all its blobs to the HTTP API. let response: Result<(), eth2::Error> = tester @@ -1718,10 +1762,12 @@ async fn slashable_blobs_or_columns_seen_on_gossip_cause_failure() { // This should not result in block A being fully imported. response.unwrap_err(); - assert!(!tester - .harness - .chain - .block_is_known_to_fork_choice(&block_a.canonical_root())); + assert!( + !tester + .harness + .chain + .block_is_known_to_fork_choice(&block_a.canonical_root()) + ); } /// This test checks that an HTTP POST request with a duplicate block & blobs results in the @@ -1775,10 +1821,12 @@ pub async fn duplicate_block_status_code() { // This should result in the block being fully imported. response.unwrap(); - assert!(tester - .harness - .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + assert!( + tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root()) + ); // Post again. let duplicate_response: Result<(), eth2::Error> = tester diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index dcc6d13ec44..880e2067775 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,16 +1,16 @@ //! Tests for API behaviour across fork boundaries. use beacon_chain::{ - test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME}, StateSkipConfig, + test_utils::{DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME, RelativeSyncCommittee}, }; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use execution_layer::test_utils::generate_genesis_header; -use genesis::{bls_withdrawal_credentials, InteropGenesisBuilder}; +use genesis::{InteropGenesisBuilder, bls_withdrawal_credentials}; use http_api::test_utils::*; use std::collections::HashSet; use types::{ - test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, Address, ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, MinimalEthSpec, Slot, + test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, }; type E = MinimalEthSpec; diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 1a31f1398ad..1e55bfb7b3a 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,10 +1,10 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` use beacon_chain::{ + ChainConfig, chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, test_utils::{AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy}, - ChainConfig, }; -use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, Work, WorkEvent}; +use beacon_processor::{Work, WorkEvent, work_reprocessing_queue::ReprocessQueueMessage}; use eth2::types::ProduceBlockV3Response; use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index dd481f23bae..fd5e282c5bd 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -1,7 +1,7 @@ //! Tests related to the beacon node's sync status use beacon_chain::{ - test_utils::{AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy}, BlockError, + test_utils::{AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy}, }; use eth2::StatusCode; use execution_layer::{PayloadStatusV1, PayloadStatusV1Status}; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 977b76e20ec..7231574b1d4 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,30 +1,30 @@ use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ - test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, + test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, }; use eth2::{ + BeaconNodeHttpClient, Error, + Error::ServerMessage, + StatusCode, Timeouts, mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, types::{ BlockId as CoreBlockId, ForkChoiceNode, ProduceBlockV3Response, StateId as CoreStateId, *, }, - BeaconNodeHttpClient, Error, - Error::ServerMessage, - StatusCode, Timeouts, }; use execution_layer::expected_gas_limit; use execution_layer::test_utils::{ - mock_builder_extra_data, mock_el_extra_data, MockBuilder, Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_GAS_LIMIT, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + MockBuilder, Operation, mock_builder_extra_data, mock_el_extra_data, }; -use futures::stream::{Stream, StreamExt}; use futures::FutureExt; +use futures::stream::{Stream, StreamExt}; use http_api::{ - test_utils::{create_api_server, ApiServer}, BlockId, StateId, + test_utils::{ApiServer, create_api_server}, }; -use lighthouse_network::{types::SyncState, Enr, EnrExt, PeerId}; +use lighthouse_network::{Enr, EnrExt, PeerId, types::SyncState}; use network::NetworkReceivers; use operation_pool::attestation_storage::CheckpointKey; use proto_array::ExecutionStatus; @@ -39,9 +39,9 @@ use tokio::time::Duration; use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; use types::{ - attestation::AttestationBase, AggregateSignature, BitList, Domain, EthSpec, ExecutionBlockHash, - Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, SingleAttestation, - Slot, + AggregateSignature, BitList, Domain, EthSpec, ExecutionBlockHash, Hash256, Keypair, + MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, SingleAttestation, Slot, + attestation::AttestationBase, }; type E = MainnetEthSpec; @@ -1564,11 +1564,12 @@ impl ApiTester { .await .0; - assert!(self - .client - .post_beacon_blocks(&PublishBlockRequest::from(block)) - .await - .is_err()); + assert!( + self.client + .post_beacon_blocks(&PublishBlockRequest::from(block)) + .await + .is_err() + ); assert!( self.network_rx.network_recv.recv().await.is_some(), @@ -1591,11 +1592,12 @@ impl ApiTester { .await .0; - assert!(self - .client - .post_beacon_blocks_ssz(&PublishBlockRequest::from(block)) - .await - .is_err()); + assert!( + self.client + .post_beacon_blocks_ssz(&PublishBlockRequest::from(block)) + .await + .is_err() + ); assert!( self.network_rx.network_recv.recv().await.is_some(), @@ -1616,11 +1618,12 @@ impl ApiTester { .0 .into(); - assert!(self - .client - .post_beacon_blocks(&block_contents) - .await - .is_ok()); + assert!( + self.client + .post_beacon_blocks(&block_contents) + .await + .is_ok() + ); // Blinded deneb block contents is just the blinded block let blinded_block_contents = block_contents.signed_block().clone_as_blinded(); @@ -2438,10 +2441,10 @@ impl ApiTester { pub async fn test_post_beacon_pool_attester_slashings_invalid_v1(mut self) -> Self { let mut slashing = self.attester_slashing.clone(); match &mut slashing { - AttesterSlashing::Base(ref mut slashing) => { + AttesterSlashing::Base(slashing) => { slashing.attestation_1.data.slot += 1; } - AttesterSlashing::Electra(ref mut slashing) => { + AttesterSlashing::Electra(slashing) => { slashing.attestation_1.data.slot += 1; } } @@ -2462,10 +2465,10 @@ impl ApiTester { pub async fn test_post_beacon_pool_attester_slashings_invalid_v2(mut self) -> Self { let mut slashing = self.attester_slashing.clone(); match &mut slashing { - AttesterSlashing::Base(ref mut slashing) => { + AttesterSlashing::Base(slashing) => { slashing.attestation_1.data.slot += 1; } - AttesterSlashing::Electra(ref mut slashing) => { + AttesterSlashing::Electra(slashing) => { slashing.attestation_1.data.slot += 1; } } @@ -4066,10 +4069,10 @@ impl ApiTester { pub async fn test_get_validator_aggregate_and_proofs_invalid_v1(mut self) -> Self { let mut aggregate = self.get_aggregate().await; match &mut aggregate { - SignedAggregateAndProof::Base(ref mut aggregate) => { + SignedAggregateAndProof::Base(aggregate) => { aggregate.message.aggregate.data.slot += 1; } - SignedAggregateAndProof::Electra(ref mut aggregate) => { + SignedAggregateAndProof::Electra(aggregate) => { aggregate.message.aggregate.data.slot += 1; } } @@ -4103,10 +4106,10 @@ impl ApiTester { pub async fn test_get_validator_aggregate_and_proofs_invalid_v2(mut self) -> Self { let mut aggregate = self.get_aggregate().await; match &mut aggregate { - SignedAggregateAndProof::Base(ref mut aggregate) => { + SignedAggregateAndProof::Base(aggregate) => { aggregate.message.aggregate.data.slot += 1; } - SignedAggregateAndProof::Electra(ref mut aggregate) => { + SignedAggregateAndProof::Electra(aggregate) => { aggregate.message.aggregate.data.slot += 1; } } @@ -4572,13 +4575,14 @@ impl ApiTester { // If this cache is empty, it indicates fallback was not used, so the payload came from the // mock builder. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_none()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none() + ); self } @@ -4617,13 +4621,14 @@ impl ApiTester { assert_eq!(payload.gas_limit(), builder_limit); // This cache should not be populated because fallback should not have been used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_none()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none() + ); // Another way is to check for the extra data of the mock builder assert_eq!(payload.extra_data(), mock_builder_extra_data::()); @@ -4657,13 +4662,14 @@ impl ApiTester { .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -4733,13 +4739,14 @@ impl ApiTester { assert_eq!(payload.fee_recipient(), test_fee_recipient); // This cache should not be populated because fallback should not have been used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_none()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none() + ); // Another way is to check for the extra data of the mock builder assert_eq!(payload.extra_data(), mock_builder_extra_data::()); @@ -4819,13 +4826,14 @@ impl ApiTester { assert_eq!(payload.parent_hash(), expected_parent_hash); // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -4911,13 +4919,14 @@ impl ApiTester { assert_eq!(payload.prev_randao(), expected_prev_randao); // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -5001,13 +5010,14 @@ impl ApiTester { assert_eq!(payload.block_number(), expected_block_number); // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -5090,13 +5100,14 @@ impl ApiTester { assert!(payload.timestamp() > min_expected_timestamp); // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -5163,13 +5174,14 @@ impl ApiTester { .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -5226,13 +5238,14 @@ impl ApiTester { .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -5302,13 +5315,14 @@ impl ApiTester { .into(); // This cache should not be populated because fallback should not have been used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_none()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none() + ); // Another way is to check for the extra data of the mock builder assert_eq!(payload.extra_data(), mock_builder_extra_data::()); @@ -5333,13 +5347,14 @@ impl ApiTester { .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -5441,13 +5456,14 @@ impl ApiTester { .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -5482,13 +5498,14 @@ impl ApiTester { .into(); // This cache should not be populated because fallback should not have been used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_none()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none() + ); // Another way is to check for the extra data of the mock builder assert_eq!(payload.extra_data(), mock_builder_extra_data::()); @@ -5601,13 +5618,14 @@ impl ApiTester { assert_eq!(payload.fee_recipient(), expected_fee_recipient); // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -5679,13 +5697,14 @@ impl ApiTester { .into(); // The builder's payload should've been chosen, so this cache should not be populated - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_none()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none() + ); // Another way is to check for the extra data of the mock builder assert_eq!(payload.extra_data(), mock_builder_extra_data::()); @@ -5747,13 +5766,14 @@ impl ApiTester { .into(); // The local payload should've been chosen, so this cache should be populated - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -5815,13 +5835,14 @@ impl ApiTester { .into(); // The local payload should've been chosen, so this cache should be populated - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); // another way is to check for the extra data of the local EE assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -5882,13 +5903,14 @@ impl ApiTester { .into(); // The builder's payload should've been chosen, so this cache should not be populated - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_none()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none() + ); // Another way is to check for the extra data of the mock builder assert_eq!(payload.extra_data(), mock_builder_extra_data::()); @@ -5953,13 +5975,14 @@ impl ApiTester { .into(); // The local payload should've been chosen because the builder's was invalid - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); + assert!( + self.chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some() + ); self } diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs index 6cbb485d718..cfa55b54eba 100644 --- a/beacon_node/http_metrics/src/lib.rs +++ b/beacon_node/http_metrics/src/lib.rs @@ -13,7 +13,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; use std::sync::Arc; use tracing::info; -use warp::{http::Response, Filter}; +use warp::{Filter, http::Response}; #[derive(Debug)] pub enum Error { diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index bcfb8e4c9cf..dbb0707a904 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -51,10 +51,10 @@ pub fn gather_prometheus_metrics( .encode_utf8(&metrics::gather(), &mut buffer) .unwrap(); // encode gossipsub metrics also if they exist - if let Some(registry) = ctx.gossipsub_registry.as_ref() { - if let Ok(registry_locked) = registry.lock() { - let _ = encode(&mut buffer, ®istry_locked); - } + if let Some(registry) = ctx.gossipsub_registry.as_ref() + && let Ok(registry_locked) = registry.lock() + { + let _ = encode(&mut buffer, ®istry_locked); } Ok(buffer) diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index 2de2fd96f86..2ce21a62b39 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -1,8 +1,8 @@ use beacon_chain::test_utils::EphemeralHarnessType; use http_metrics::Config; use logging::create_test_tracing_subscriber; -use reqwest::header::HeaderValue; use reqwest::StatusCode; +use reqwest::header::HeaderValue; use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; use tokio::sync::oneshot; diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 053527f1199..bb3a32daf2d 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -2,11 +2,11 @@ pub use discv5::enr::CombinedKey; -use super::enr_ext::CombinedKeyExt; -use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY}; use super::ENR_FILENAME; -use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; +use super::enr_ext::CombinedKeyExt; +use super::enr_ext::{EnrExt, QUIC_ENR_KEY, QUIC6_ENR_KEY}; use crate::NetworkConfig; +use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use alloy_rlp::bytes::Bytes; use libp2p::identity::Keypair; use lighthouse_version::{client_name, version}; diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index bae72356044..1d065ebf4ae 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -3,7 +3,7 @@ use crate::{Enr, Multiaddr, PeerId}; use discv5::enr::{CombinedKey, CombinedPublicKey}; use libp2p::core::multiaddr::Protocol; -use libp2p::identity::{ed25519, secp256k1, KeyType, Keypair, PublicKey}; +use libp2p::identity::{KeyType, Keypair, PublicKey, ed25519, secp256k1}; use tiny_keccak::{Hasher, Keccak}; pub const QUIC_ENR_KEY: &str = "quic"; @@ -164,21 +164,21 @@ impl EnrExt for Enr { fn multiaddr_p2p_tcp(&self) -> Vec { let peer_id = self.peer_id(); let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip4() { - if let Some(tcp) = self.tcp4() { - let mut multiaddr: Multiaddr = ip.into(); - multiaddr.push(Protocol::Tcp(tcp)); - multiaddr.push(Protocol::P2p(peer_id)); - multiaddrs.push(multiaddr); - } + if let Some(ip) = self.ip4() + && let Some(tcp) = self.tcp4() + { + let mut multiaddr: Multiaddr = ip.into(); + multiaddr.push(Protocol::Tcp(tcp)); + multiaddr.push(Protocol::P2p(peer_id)); + multiaddrs.push(multiaddr); } - if let Some(ip6) = self.ip6() { - if let Some(tcp6) = self.tcp6() { - let mut multiaddr: Multiaddr = ip6.into(); - multiaddr.push(Protocol::Tcp(tcp6)); - multiaddr.push(Protocol::P2p(peer_id)); - multiaddrs.push(multiaddr); - } + if let Some(ip6) = self.ip6() + && let Some(tcp6) = self.tcp6() + { + let mut multiaddr: Multiaddr = ip6.into(); + multiaddr.push(Protocol::Tcp(tcp6)); + multiaddr.push(Protocol::P2p(peer_id)); + multiaddrs.push(multiaddr); } multiaddrs } @@ -190,21 +190,21 @@ impl EnrExt for Enr { fn multiaddr_p2p_udp(&self) -> Vec { let peer_id = self.peer_id(); let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip4() { - if let Some(udp) = self.udp4() { - let mut multiaddr: Multiaddr = ip.into(); - multiaddr.push(Protocol::Udp(udp)); - multiaddr.push(Protocol::P2p(peer_id)); - multiaddrs.push(multiaddr); - } + if let Some(ip) = self.ip4() + && let Some(udp) = self.udp4() + { + let mut multiaddr: Multiaddr = ip.into(); + multiaddr.push(Protocol::Udp(udp)); + multiaddr.push(Protocol::P2p(peer_id)); + multiaddrs.push(multiaddr); } - if let Some(ip6) = self.ip6() { - if let Some(udp6) = self.udp6() { - let mut multiaddr: Multiaddr = ip6.into(); - multiaddr.push(Protocol::Udp(udp6)); - multiaddr.push(Protocol::P2p(peer_id)); - multiaddrs.push(multiaddr); - } + if let Some(ip6) = self.ip6() + && let Some(udp6) = self.udp6() + { + let mut multiaddr: Multiaddr = ip6.into(); + multiaddr.push(Protocol::Udp(udp6)); + multiaddr.push(Protocol::P2p(peer_id)); + multiaddrs.push(multiaddr); } multiaddrs } @@ -212,22 +212,22 @@ impl EnrExt for Enr { /// Returns a list of multiaddrs if the ENR has an `ip` and a `quic` key **or** an `ip6` and a `quic6`. fn multiaddr_quic(&self) -> Vec { let mut multiaddrs: Vec = Vec::new(); - if let Some(quic_port) = self.quic4() { - if let Some(ip) = self.ip4() { - let mut multiaddr: Multiaddr = ip.into(); - multiaddr.push(Protocol::Udp(quic_port)); - multiaddr.push(Protocol::QuicV1); - multiaddrs.push(multiaddr); - } + if let Some(quic_port) = self.quic4() + && let Some(ip) = self.ip4() + { + let mut multiaddr: Multiaddr = ip.into(); + multiaddr.push(Protocol::Udp(quic_port)); + multiaddr.push(Protocol::QuicV1); + multiaddrs.push(multiaddr); } - if let Some(quic6_port) = self.quic6() { - if let Some(ip6) = self.ip6() { - let mut multiaddr: Multiaddr = ip6.into(); - multiaddr.push(Protocol::Udp(quic6_port)); - multiaddr.push(Protocol::QuicV1); - multiaddrs.push(multiaddr); - } + if let Some(quic6_port) = self.quic6() + && let Some(ip6) = self.ip6() + { + let mut multiaddr: Multiaddr = ip6.into(); + multiaddr.push(Protocol::Udp(quic6_port)); + multiaddr.push(Protocol::QuicV1); + multiaddrs.push(multiaddr); } multiaddrs } @@ -235,19 +235,19 @@ impl EnrExt for Enr { /// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`. fn multiaddr_tcp(&self) -> Vec { let mut multiaddrs: Vec = Vec::new(); - if let Some(ip) = self.ip4() { - if let Some(tcp) = self.tcp4() { - let mut multiaddr: Multiaddr = ip.into(); - multiaddr.push(Protocol::Tcp(tcp)); - multiaddrs.push(multiaddr); - } + if let Some(ip) = self.ip4() + && let Some(tcp) = self.tcp4() + { + let mut multiaddr: Multiaddr = ip.into(); + multiaddr.push(Protocol::Tcp(tcp)); + multiaddrs.push(multiaddr); } - if let Some(ip6) = self.ip6() { - if let Some(tcp6) = self.tcp6() { - let mut multiaddr: Multiaddr = ip6.into(); - multiaddr.push(Protocol::Tcp(tcp6)); - multiaddrs.push(multiaddr); - } + if let Some(ip6) = self.ip6() + && let Some(tcp6) = self.tcp6() + { + let mut multiaddr: Multiaddr = ip6.into(); + multiaddr.push(Protocol::Tcp(tcp6)); + multiaddrs.push(multiaddr); } multiaddrs } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index bc7802ce9aa..2d471538093 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -8,11 +8,11 @@ pub mod enr_ext; // Allow external use of the lighthouse ENR builder use crate::service::TARGET_SUBNET_PEERS; -use crate::{metrics, ClearDialError}; +use crate::{ClearDialError, metrics}; use crate::{Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; -use discv5::{enr::NodeId, Discv5}; -pub use enr::{build_enr, load_enr_from_disk, use_or_load_enr, CombinedKey, Eth2Enr}; -pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; +use discv5::{Discv5, enr::NodeId}; +pub use enr::{CombinedKey, Eth2Enr, build_enr, load_enr_from_disk, use_or_load_enr}; +pub use enr_ext::{CombinedKeyExt, EnrExt, peer_id_to_node_id}; pub use libp2p::identity::{Keypair, PublicKey}; use alloy_rlp::bytes::Bytes; @@ -21,14 +21,14 @@ use futures::prelude::*; use futures::stream::FuturesUnordered; use libp2p::core::transport::PortUse; use libp2p::multiaddr::Protocol; -use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; use libp2p::swarm::THandlerInEvent; +use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; pub use libp2p::{ - core::{transport::ListenerId, ConnectedPoint, Multiaddr}, + core::{ConnectedPoint, Multiaddr, transport::ListenerId}, identity::PeerId, swarm::{ - dummy::ConnectionHandler, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, - SubstreamProtocol, ToSwarm, + ConnectionId, DialError, NetworkBehaviour, NotifyHandler, SubstreamProtocol, ToSwarm, + dummy::ConnectionHandler, }, }; use logging::crit; @@ -1132,7 +1132,10 @@ impl NetworkBehaviour for Discovery { self.update_enr_quic_port(port, false) } _ => { - debug!(?addr, "Encountered unacceptable multiaddr for listening (unsupported transport)"); + debug!( + ?addr, + "Encountered unacceptable multiaddr for listening (unsupported transport)" + ); return; } }, @@ -1154,7 +1157,10 @@ impl NetworkBehaviour for Discovery { self.update_enr_quic_port(port, true) } _ => { - debug!(?addr, "Encountered unacceptable multiaddr for listening (unsupported transport)"); + debug!( + ?addr, + "Encountered unacceptable multiaddr for listening (unsupported transport)" + ); return; } }, diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 735ef5b0f28..30f42f41628 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -3,8 +3,8 @@ use super::*; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use std::ops::Deref; use tracing::trace; -use types::data_column_custody_group::compute_subnets_for_node; use types::ChainSpec; +use types::data_column_custody_group::compute_subnets_for_node; /// Returns the predicate for a given subnet. pub fn subnet_predicate( diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 96853ea57cb..5c4a4586508 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -15,7 +15,7 @@ pub mod types; use libp2p::swarm::DialError; pub use listen_addr::*; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer, de}; use std::str::FromStr; /// Wrapper over a libp2p `PeerId` which implements `Serialize` and `Deserialize` @@ -111,14 +111,14 @@ pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; pub use gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p; -pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; -pub use libp2p::{multiaddr, Multiaddr}; +pub use libp2p::{Multiaddr, multiaddr}; +pub use libp2p::{PeerId, Swarm, core::ConnectedPoint}; pub use metrics::scrape_discovery_metrics; pub use peer_manager::{ + ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, + peerdb::PeerDB, peerdb::client::Client, peerdb::score::{PeerAction, ReportSource}, - peerdb::PeerDB, - ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; // pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; pub use service::api_types::Response; diff --git a/beacon_node/lighthouse_network/src/listen_addr.rs b/beacon_node/lighthouse_network/src/listen_addr.rs index 3b0ff98b34f..85232c0b359 100644 --- a/beacon_node/lighthouse_network/src/listen_addr.rs +++ b/beacon_node/lighthouse_network/src/listen_addr.rs @@ -1,6 +1,6 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; -use libp2p::{multiaddr::Protocol, Multiaddr}; +use libp2p::{Multiaddr, multiaddr::Protocol}; use serde::{Deserialize, Serialize}; /// A listening address composed by an Ip, an UDP port and a TCP port. diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 01cc1611058..13367a3e997 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -4,7 +4,7 @@ use crate::discovery::enr_ext::EnrExt; use crate::discovery::peer_id_to_node_id; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RpcErrorResponse}; use crate::service::TARGET_SUBNET_PEERS; -use crate::{metrics, Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery}; +use crate::{Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery, metrics}; use delay_map::HashSetDelay; use discv5::Enr; use libp2p::identify::Info as IdentifyInfo; @@ -31,11 +31,11 @@ pub use peerdb::peer_info::{ }; use peerdb::score::{PeerAction, ReportSource}; pub use peerdb::sync_status::{SyncInfo, SyncStatus}; -use std::collections::{hash_map::Entry, HashMap, HashSet}; +use std::collections::{HashMap, HashSet, hash_map::Entry}; use std::net::IpAddr; use strum::IntoEnumIterator; use types::data_column_custody_group::{ - compute_subnets_from_custody_group, get_custody_groups, CustodyIndex, + CustodyIndex, compute_subnets_from_custody_group, get_custody_groups, }; pub mod config; @@ -1141,7 +1141,7 @@ impl PeerManager { if !peers_on_subnet.is_empty() { // Order the peers by the number of subnets they are long-lived // subscribed too, shuffle equal peers. - peers_on_subnet.shuffle(&mut rand::thread_rng()); + peers_on_subnet.shuffle(&mut rand::rng()); peers_on_subnet.sort_by_key(|(_, info)| info.long_lived_subnet_count()); // Try and find a candidate peer to remove from the subnet. @@ -1525,8 +1525,8 @@ enum ConnectingType { #[cfg(test)] mod tests { use super::*; - use crate::rpc::MetaDataV3; use crate::NetworkConfig; + use crate::rpc::MetaDataV3; use types::{ChainSpec, ForkName, MainnetEthSpec as E}; async fn build_peer_manager(target_peer_count: usize) -> PeerManager { @@ -1619,32 +1619,40 @@ mod tests { // Check that one outbound-only peer was removed because it had the worst score // and that we did not disconnect the other outbound peer due to the minimum outbound quota. assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 3); - assert!(peer_manager - .network_globals - .peers - .read() - .is_connected(&outbound_only_peer1)); - assert!(!peer_manager - .network_globals - .peers - .read() - .is_connected(&outbound_only_peer2)); + assert!( + peer_manager + .network_globals + .peers + .read() + .is_connected(&outbound_only_peer1) + ); + assert!( + !peer_manager + .network_globals + .peers + .read() + .is_connected(&outbound_only_peer2) + ); // The trusted peer remains connected - assert!(peer_manager - .network_globals - .peers - .read() - .is_connected(&trusted_peer)); + assert!( + peer_manager + .network_globals + .peers + .read() + .is_connected(&trusted_peer) + ); peer_manager.heartbeat(); // The trusted peer remains connected, even after subsequent heartbeats. - assert!(peer_manager - .network_globals - .peers - .read() - .is_connected(&trusted_peer)); + assert!( + peer_manager + .network_globals + .peers + .read() + .is_connected(&trusted_peer) + ); // Check that if we are at target number of peers, we do not disconnect any. assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 3); @@ -1956,13 +1964,7 @@ mod tests { // id mod % 4 // except for the last 5 peers which all go on their own subnets // So subnets 0-2 should have 4 peers subnet 3 should have 3 and 15-19 should have 1 - let subnet: u64 = { - if x < 15 { - x % 4 - } else { - x - } - }; + let subnet: u64 = { if x < 15 { x % 4 } else { x } }; let peer = PeerId::random(); peer_manager.inject_connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index a45b941e58d..43d9b90d8dc 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -4,21 +4,21 @@ use std::net::IpAddr; use std::task::{Context, Poll}; use futures::StreamExt; -use libp2p::core::transport::PortUse; use libp2p::core::ConnectedPoint; +use libp2p::core::transport::PortUse; use libp2p::identity::PeerId; use libp2p::multiaddr::Protocol; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dummy::ConnectionHandler; use libp2p::swarm::{ConnectionDenied, ConnectionId, NetworkBehaviour, ToSwarm}; -pub use metrics::{set_gauge_vec, NAT_OPEN}; +pub use metrics::{NAT_OPEN, set_gauge_vec}; use tracing::{debug, error, trace}; use types::EthSpec; use crate::discovery::enr_ext::EnrExt; use crate::types::SyncState; -use crate::{metrics, ClearDialError}; +use crate::{ClearDialError, metrics}; use super::{ConnectingType, PeerManager, PeerManagerEvent}; @@ -172,7 +172,7 @@ impl NetworkBehaviour for PeerManager { _ => { return Err(ConnectionDenied::new(format!( "Connection to peer rejected: invalid multiaddr: {remote_addr}" - ))) + ))); } }; @@ -340,10 +340,10 @@ impl PeerManager { /// connects and the dial attempt later fails. To handle this, we only update the peer_db if /// the peer is not already connected. fn on_dial_failure(&mut self, peer_id: Option) { - if let Some(peer_id) = peer_id { - if !self.network_globals.peers.read().is_connected(&peer_id) { - self.inject_disconnect(&peer_id); - } + if let Some(peer_id) = peer_id + && !self.network_globals.peers.read().is_connected(&peer_id) + { + self.inject_disconnect(&peer_id); } } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 7dd4e6544d8..430ad2f6dae 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,7 +1,7 @@ use crate::discovery::enr::PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY; -use crate::discovery::{peer_id_to_node_id, CombinedKey}; +use crate::discovery::{CombinedKey, peer_id_to_node_id}; use crate::{ - metrics, multiaddr::Multiaddr, types::Subnet, Enr, EnrExt, Gossipsub, PeerId, SyncInfo, + Enr, EnrExt, Gossipsub, PeerId, SyncInfo, metrics, multiaddr::Multiaddr, types::Subnet, }; use itertools::Itertools; use logging::crit; @@ -11,7 +11,7 @@ use std::net::IpAddr; use std::time::Instant; use std::{cmp::Ordering, fmt::Display}; use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{HashMap, HashSet, hash_map::Entry}, fmt::Formatter, }; use sync_status::SyncStatus; @@ -431,12 +431,11 @@ impl PeerDB { .peers .iter() .filter_map(|(peer_id, info)| { - if let PeerConnectionStatus::Dialing { since } = info.connection_status() { - if (*since) + std::time::Duration::from_secs(DIAL_TIMEOUT) + if let PeerConnectionStatus::Dialing { since } = info.connection_status() + && (*since) + std::time::Duration::from_secs(DIAL_TIMEOUT) < std::time::Instant::now() - { - return Some(*peer_id); - } + { + return Some(*peer_id); } None }) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs index 9450584d6fc..5e761f90a95 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs @@ -127,12 +127,12 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("teku") => { let kind = ClientKind::Teku; - if agent_split.next().is_some() { - if let Some(agent_version) = agent_split.next() { - version = agent_version.into(); - if let Some(agent_os_version) = agent_split.next() { - os_version = agent_os_version.into(); - } + if agent_split.next().is_some() + && let Some(agent_version) = agent_split.next() + { + version = agent_version.into(); + if let Some(agent_os_version) = agent_split.next() { + os_version = agent_os_version.into(); } } (kind, version, os_version) @@ -143,24 +143,24 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("Prysm") => { let kind = ClientKind::Prysm; - if agent_split.next().is_some() { - if let Some(agent_version) = agent_split.next() { - version = agent_version.into(); - if let Some(agent_os_version) = agent_split.next() { - os_version = agent_os_version.into(); - } + if agent_split.next().is_some() + && let Some(agent_version) = agent_split.next() + { + version = agent_version.into(); + if let Some(agent_os_version) = agent_split.next() { + os_version = agent_os_version.into(); } } (kind, version, os_version) } Some("nimbus") => { let kind = ClientKind::Nimbus; - if agent_split.next().is_some() { - if let Some(agent_version) = agent_split.next() { - version = agent_version.into(); - if let Some(agent_os_version) = agent_split.next() { - os_version = agent_os_version.into(); - } + if agent_split.next().is_some() + && let Some(agent_version) = agent_split.next() + { + version = agent_version.into(); + if let Some(agent_os_version) = agent_split.next() { + os_version = agent_os_version.into(); } } (kind, version, os_version) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 4c47df63437..e643fca30fb 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -3,19 +3,19 @@ use super::score::{PeerAction, Score, ScoreState}; use super::sync_status::SyncStatus; use crate::discovery::Eth2Enr; use crate::{rpc::MetaData, types::Subnet}; +use PeerConnectionStatus::*; use discv5::Enr; use eth2::types::{PeerDirection, PeerState}; use libp2p::core::multiaddr::{Multiaddr, Protocol}; use serde::{ - ser::{SerializeStruct, Serializer}, Serialize, + ser::{SerializeStruct, Serializer}, }; use std::collections::HashSet; use std::net::IpAddr; use std::time::Instant; use strum::AsRefStr; use types::{DataColumnSubnetId, EthSpec}; -use PeerConnectionStatus::*; /// Information about a given connected peer. #[derive(Clone, Debug, Serialize)] @@ -95,15 +95,15 @@ impl PeerInfo { if let Some(meta_data) = &self.meta_data { match subnet { Subnet::Attestation(id) => { - return meta_data.attnets().get(**id as usize).unwrap_or(false) + return meta_data.attnets().get(**id as usize).unwrap_or(false); } Subnet::SyncCommittee(id) => { return meta_data .syncnets() - .is_ok_and(|s| s.get(**id as usize).unwrap_or(false)) + .is_ok_and(|s| s.get(**id as usize).unwrap_or(false)); } Subnet::DataColumn(subnet_id) => { - return self.is_assigned_to_custody_subnet(subnet_id) + return self.is_assigned_to_custody_subnet(subnet_id); } } } @@ -179,10 +179,10 @@ impl PeerInfo { pub fn long_lived_subnet_count(&self) -> usize { if let Some(meta_data) = self.meta_data.as_ref() { return meta_data.attnets().num_set_bits(); - } else if let Some(enr) = self.enr.as_ref() { - if let Ok(attnets) = enr.attestation_bitfield::() { - return attnets.num_set_bits(); - } + } else if let Some(enr) = self.enr.as_ref() + && let Ok(attnets) = enr.attestation_bitfield::() + { + return attnets.num_set_bits(); } 0 } @@ -247,20 +247,20 @@ impl PeerInfo { if !meta_data.attnets().is_zero() && !self.subnets.is_empty() { return true; } - if let Ok(sync) = meta_data.syncnets() { - if !sync.is_zero() { - return true; - } + if let Ok(sync) = meta_data.syncnets() + && !sync.is_zero() + { + return true; } } // We may not have the metadata but may have an ENR. Lets check that - if let Some(enr) = self.enr.as_ref() { - if let Ok(attnets) = enr.attestation_bitfield::() { - if !attnets.is_zero() && !self.subnets.is_empty() { - return true; - } - } + if let Some(enr) = self.enr.as_ref() + && let Ok(attnets) = enr.attestation_bitfield::() + && !attnets.is_zero() + && !self.subnets.is_empty() + { + return true; } false } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index 995ebf90646..e57e7907db7 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -332,11 +332,7 @@ impl Score { Some(v) => { // Only reverse when none of the items is NAN, // so that NAN's are never considered. - if reverse { - v.reverse() - } else { - v - } + if reverse { v.reverse() } else { v } } None if self.score().is_nan() && !other.score().is_nan() => Ordering::Less, None if !self.score().is_nan() && other.score().is_nan() => Ordering::Greater, diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index d01b3b76ca1..0a468d1ca60 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -1,8 +1,8 @@ +use crate::rpc::RequestType; use crate::rpc::methods::*; use crate::rpc::protocol::{ - Encoding, ProtocolId, RPCError, SupportedProtocol, ERROR_TYPE_MAX, ERROR_TYPE_MIN, + ERROR_TYPE_MAX, ERROR_TYPE_MIN, Encoding, ProtocolId, RPCError, SupportedProtocol, }; -use crate::rpc::RequestType; use libp2p::bytes::BufMut; use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; @@ -467,12 +467,12 @@ fn context_bytes( resp: &RpcResponse, ) -> Option<[u8; CONTEXT_BYTES_LEN]> { // Add the context bytes if required - if protocol.has_context_bytes() { - if let RpcResponse::Success(rpc_variant) = resp { - return rpc_variant - .slot() - .map(|slot| fork_context.context_bytes(slot.epoch(E::slots_per_epoch()))); - } + if protocol.has_context_bytes() + && let RpcResponse::Success(rpc_variant) = resp + { + return rpc_variant + .slot() + .map(|slot| fork_context.context_bytes(slot.epoch(E::slots_per_epoch()))); } None } @@ -902,10 +902,10 @@ mod tests { use crate::rpc::protocol::*; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use types::{ - blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, BeaconBlock, BeaconBlockAltair, - BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, DataColumnsByRootIdentifier, - EmptyBlock, Epoch, FixedBytesExtended, FullPayload, KzgCommitment, KzgProof, Signature, - SignedBeaconBlockHeader, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, + DataColumnsByRootIdentifier, EmptyBlock, Epoch, FixedBytesExtended, FullPayload, + KzgCommitment, KzgProof, Signature, SignedBeaconBlockHeader, Slot, + blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, }; type Spec = types::MainnetEthSpec; @@ -1903,13 +1903,15 @@ mod tests { .unwrap(), ); - assert!(decode_response( - SupportedProtocol::MetaDataV2, - &mut encoded_bytes, - ForkName::Altair, - &chain_spec, - ) - .is_err()); + assert!( + decode_response( + SupportedProtocol::MetaDataV2, + &mut encoded_bytes, + ForkName::Altair, + &chain_spec, + ) + .is_err() + ); // Sending context bytes which do not correspond to any fork should return an error let mut encoded_bytes = encode_response( diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 7a746a63e18..ef9347a1197 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -1,4 +1,4 @@ -use super::{rate_limiter::Quota, Protocol}; +use super::{Protocol, rate_limiter::Quota}; use std::num::NonZeroU64; use std::{ fmt::{Debug, Display}, diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index fe7be936622..972d45cdfeb 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -8,25 +8,25 @@ use super::{RPCReceived, RPCSend, ReqId}; use crate::rpc::outbound::OutboundFramed; use crate::rpc::protocol::InboundFramed; use fnv::FnvHashMap; -use futures::prelude::*; use futures::SinkExt; +use futures::prelude::*; +use libp2p::PeerId; use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; use libp2p::swarm::{ConnectionId, Stream}; -use libp2p::PeerId; use logging::crit; use smallvec::SmallVec; use std::{ - collections::{hash_map::Entry, VecDeque}, + collections::{VecDeque, hash_map::Entry}, pin::Pin, sync::Arc, task::{Context, Poll}, time::{Duration, Instant}, }; -use tokio::time::{sleep, Sleep}; -use tokio_util::time::{delay_queue, DelayQueue}; +use tokio::time::{Sleep, sleep}; +use tokio_util::time::{DelayQueue, delay_queue}; use tracing::{debug, trace}; use types::{EthSpec, ForkContext, Slot}; @@ -848,23 +848,22 @@ where } // Check if we have completed sending a goodbye, disconnect. - if let HandlerState::ShuttingDown(_) = self.state { - if self.dial_queue.is_empty() - && self.outbound_substreams.is_empty() - && self.inbound_substreams.is_empty() - && self.events_out.is_empty() - && self.dial_negotiated == 0 - { - debug!( - peer_id = %self.peer_id, - connection_id = %self.connection_id, - "Goodbye sent, Handler deactivated" - ); - self.state = HandlerState::Deactivated; - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - HandlerEvent::Close(RPCError::Disconnected), - )); - } + if let HandlerState::ShuttingDown(_) = self.state + && self.dial_queue.is_empty() + && self.outbound_substreams.is_empty() + && self.inbound_substreams.is_empty() + && self.events_out.is_empty() + && self.dial_negotiated == 0 + { + debug!( + peer_id = %self.peer_id, + connection_id = %self.connection_id, + "Goodbye sent, Handler deactivated" + ); + self.state = HandlerState::Deactivated; + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Close(RPCError::Disconnected), + )); } Poll::Pending diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 53005448211..180130a2bf4 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -5,7 +5,7 @@ use regex::bytes::Regex; use serde::Serialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::{typenum::U256, VariableList}; +use ssz_types::{VariableList, typenum::U256}; use std::fmt::Display; use std::marker::PhantomData; use std::ops::Deref; @@ -15,10 +15,10 @@ use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ - blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, - DataColumnsByRootIdentifier, Epoch, EthSpec, ForkContext, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, - SignedBeaconBlock, Slot, + ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, + ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, + LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, + blob_sidecar::BlobSidecar, }; /// Maximum length of error message. @@ -784,7 +784,16 @@ impl std::fmt::Display for RpcErrorResponse { impl std::fmt::Display for StatusMessage { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Status Message: Fork Digest: {:?}, Finalized Root: {}, Finalized Epoch: {}, Head Root: {}, Head Slot: {}, Earliest available slot: {:?}", self.fork_digest(), self.finalized_root(), self.finalized_epoch(), self.head_root(), self.head_slot(), self.earliest_available_slot()) + write!( + f, + "Status Message: Fork Digest: {:?}, Finalized Root: {}, Finalized Epoch: {}, Head Root: {}, Head Slot: {}, Earliest available slot: {:?}", + self.fork_digest(), + self.finalized_root(), + self.finalized_epoch(), + self.head_root(), + self.head_slot(), + self.earliest_available_slot() + ) } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index a086392c47c..5e8e55891c6 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -5,13 +5,13 @@ //! syncing. use handler::RPCHandler; +use libp2p::PeerId; use libp2p::core::transport::PortUse; use libp2p::swarm::{ - handler::ConnectionHandler, CloseConnection, ConnectionId, NetworkBehaviour, NotifyHandler, - ToSwarm, + CloseConnection, ConnectionId, NetworkBehaviour, NotifyHandler, ToSwarm, + handler::ConnectionHandler, }; use libp2p::swarm::{ConnectionClosed, FromSwarm, SubstreamProtocol, THandlerInEvent}; -use libp2p::PeerId; use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; @@ -247,17 +247,17 @@ impl RPC { request_id: InboundRequestId, response: RpcResponse, ) { - if let Some(response_limiter) = self.response_limiter.as_mut() { - if !response_limiter.allows( + if let Some(response_limiter) = self.response_limiter.as_mut() + && !response_limiter.allows( peer_id, protocol, request_id.connection_id, request_id.substream_id, response.clone(), - ) { - // Response is logged and queued internally in the response limiter. - return; - } + ) + { + // Response is logged and queued internally in the response limiter. + return; } self.events.push(ToSwarm::NotifyHandler { @@ -564,15 +564,15 @@ where } fn poll(&mut self, cx: &mut Context) -> Poll>> { - if let Some(response_limiter) = self.response_limiter.as_mut() { - if let Poll::Ready(responses) = response_limiter.poll_ready(cx) { - for response in responses { - self.events.push(ToSwarm::NotifyHandler { - peer_id: response.peer_id, - handler: NotifyHandler::One(response.connection_id), - event: RPCSend::Response(response.substream_id, response.response), - }); - } + if let Some(response_limiter) = self.response_limiter.as_mut() + && let Poll::Ready(responses) = response_limiter.poll_ready(cx) + { + for response in responses { + self.events.push(ToSwarm::NotifyHandler { + peer_id: response.peer_id, + handler: NotifyHandler::One(response.connection_id), + event: RPCSend::Response(response.substream_id, response.response), + }); } } diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index b614313a84b..3fbc279d005 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -1,6 +1,6 @@ -use super::protocol::ProtocolId; use super::RPCError; use super::RequestType; +use super::protocol::ProtocolId; use crate::rpc::codec::SSZSnappyOutboundCodec; use crate::rpc::protocol::Encoding; use futures::future::BoxFuture; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 500e98d5c33..388dbe63ef0 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -1030,7 +1030,7 @@ impl RPCError { /// Used for metrics. pub fn as_static_str(&self) -> &'static str { match self { - RPCError::ErrorResponse(ref code, ..) => code.into(), + RPCError::ErrorResponse(code, ..) => code.into(), e => e.into(), } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index f8fd54eb2a9..65cd1c2e61e 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -506,25 +506,37 @@ mod tests { // | | | | | // 0 1 2 - assert!(limiter - .allows(Duration::from_secs_f32(0.0), &key, 4) - .is_ok()); + assert!( + limiter + .allows(Duration::from_secs_f32(0.0), &key, 4) + .is_ok() + ); limiter.prune(Duration::from_secs_f32(0.1)); - assert!(limiter - .allows(Duration::from_secs_f32(0.1), &key, 1) - .is_err()); - assert!(limiter - .allows(Duration::from_secs_f32(0.5), &key, 1) - .is_ok()); - assert!(limiter - .allows(Duration::from_secs_f32(1.0), &key, 1) - .is_ok()); - assert!(limiter - .allows(Duration::from_secs_f32(1.4), &key, 1) - .is_err()); - assert!(limiter - .allows(Duration::from_secs_f32(2.0), &key, 2) - .is_ok()); + assert!( + limiter + .allows(Duration::from_secs_f32(0.1), &key, 1) + .is_err() + ); + assert!( + limiter + .allows(Duration::from_secs_f32(0.5), &key, 1) + .is_ok() + ); + assert!( + limiter + .allows(Duration::from_secs_f32(1.0), &key, 1) + .is_ok() + ); + assert!( + limiter + .allows(Duration::from_secs_f32(1.4), &key, 1) + .is_err() + ); + assert!( + limiter + .allows(Duration::from_secs_f32(2.0), &key, 2) + .is_ok() + ); } #[test] @@ -539,21 +551,31 @@ mod tests { // first half second, when one token will be available again. Check also that before // regaining a token, another request is rejected - assert!(limiter - .allows(Duration::from_secs_f32(0.0), &key, 1) - .is_ok()); - assert!(limiter - .allows(Duration::from_secs_f32(0.1), &key, 1) - .is_ok()); - assert!(limiter - .allows(Duration::from_secs_f32(0.2), &key, 1) - .is_ok()); - assert!(limiter - .allows(Duration::from_secs_f32(0.3), &key, 1) - .is_ok()); - assert!(limiter - .allows(Duration::from_secs_f32(0.4), &key, 1) - .is_err()); + assert!( + limiter + .allows(Duration::from_secs_f32(0.0), &key, 1) + .is_ok() + ); + assert!( + limiter + .allows(Duration::from_secs_f32(0.1), &key, 1) + .is_ok() + ); + assert!( + limiter + .allows(Duration::from_secs_f32(0.2), &key, 1) + .is_ok() + ); + assert!( + limiter + .allows(Duration::from_secs_f32(0.3), &key, 1) + .is_ok() + ); + assert!( + limiter + .allows(Duration::from_secs_f32(0.4), &key, 1) + .is_err() + ); } #[test] diff --git a/beacon_node/lighthouse_network/src/rpc/response_limiter.rs b/beacon_node/lighthouse_network/src/rpc/response_limiter.rs index c583baaadd1..bd3035f89c2 100644 --- a/beacon_node/lighthouse_network/src/rpc/response_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/response_limiter.rs @@ -1,8 +1,8 @@ +use crate::PeerId; use crate::rpc::config::InboundRateLimiterConfig; use crate::rpc::rate_limiter::{RPCRateLimiter, RateLimitedErr}; use crate::rpc::self_limiter::timestamp_now; use crate::rpc::{Protocol, RpcResponse, SubstreamId}; -use crate::PeerId; use futures::FutureExt; use libp2p::swarm::ConnectionId; use logging::crit; diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index f26dc4c7a84..90e2db91357 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -1,19 +1,19 @@ use super::{ + BehaviourAction, MAX_CONCURRENT_REQUESTS, Protocol, RPCSend, ReqId, RequestType, config::OutboundRateLimiterConfig, rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}, - BehaviourAction, Protocol, RPCSend, ReqId, RequestType, MAX_CONCURRENT_REQUESTS, }; use crate::rpc::rate_limiter::RateLimiterItem; use std::time::{SystemTime, UNIX_EPOCH}; use std::{ - collections::{hash_map::Entry, HashMap, VecDeque}, + collections::{HashMap, VecDeque, hash_map::Entry}, sync::Arc, task::{Context, Poll}, time::Duration, }; use futures::FutureExt; -use libp2p::{swarm::NotifyHandler, PeerId}; +use libp2p::{PeerId, swarm::NotifyHandler}; use logging::crit; use smallvec::SmallVec; use tokio_util::time::DelayQueue; @@ -130,24 +130,23 @@ impl SelfRateLimiter { request_id: Id, req: RequestType, ) -> Result, (QueuedRequest, Duration)> { - if let Some(active_request) = active_requests.get(&peer_id) { - if let Some(count) = active_request.get(&req.protocol()) { - if *count >= MAX_CONCURRENT_REQUESTS { - debug!( - %peer_id, - protocol = %req.protocol(), - "Self rate limiting due to the number of concurrent requests" - ); - return Err(( - QueuedRequest { - req, - request_id, - queued_at: timestamp_now(), - }, - Duration::from_millis(WAIT_TIME_DUE_TO_CONCURRENT_REQUESTS), - )); - } - } + if let Some(active_request) = active_requests.get(&peer_id) + && let Some(count) = active_request.get(&req.protocol()) + && *count >= MAX_CONCURRENT_REQUESTS + { + debug!( + %peer_id, + protocol = %req.protocol(), + "Self rate limiting due to the number of concurrent requests" + ); + return Err(( + QueuedRequest { + req, + request_id, + queued_at: timestamp_now(), + }, + Duration::from_millis(WAIT_TIME_DUE_TO_CONCURRENT_REQUESTS), + )); } if let Some(limiter) = rate_limiter.as_mut() { @@ -258,13 +257,13 @@ impl SelfRateLimiter { /// Informs the limiter that a response has been received. pub fn request_completed(&mut self, peer_id: &PeerId, protocol: Protocol) { - if let Some(active_requests) = self.active_requests.get_mut(peer_id) { - if let Entry::Occupied(mut entry) = active_requests.entry(protocol) { - if *entry.get() > 1 { - *entry.get_mut() -= 1; - } else { - entry.remove(); - } + if let Some(active_requests) = self.active_requests.get_mut(peer_id) + && let Entry::Occupied(mut entry) = active_requests.entry(protocol) + { + if *entry.get() > 1 { + *entry.get_mut() -= 1; + } else { + entry.remove(); } } } @@ -511,13 +510,17 @@ mod tests { } assert!(limiter.active_requests.contains_key(&peer1)); - assert!(limiter - .delayed_requests - .contains_key(&(peer1, Protocol::Ping))); + assert!( + limiter + .delayed_requests + .contains_key(&(peer1, Protocol::Ping)) + ); assert!(limiter.active_requests.contains_key(&peer2)); - assert!(limiter - .delayed_requests - .contains_key(&(peer2, Protocol::Ping))); + assert!( + limiter + .delayed_requests + .contains_key(&(peer2, Protocol::Ping)) + ); // Check that the limiter returns the IDs of pending requests and that the IDs are ordered correctly. let mut failed_requests = limiter.peer_disconnected(peer1); @@ -533,13 +536,17 @@ mod tests { // Check that peer1’s active and delayed requests have been removed. assert!(!limiter.active_requests.contains_key(&peer1)); - assert!(!limiter - .delayed_requests - .contains_key(&(peer1, Protocol::Ping))); + assert!( + !limiter + .delayed_requests + .contains_key(&(peer1, Protocol::Ping)) + ); assert!(limiter.active_requests.contains_key(&peer2)); - assert!(limiter - .delayed_requests - .contains_key(&(peer2, Protocol::Ping))); + assert!( + limiter + .delayed_requests + .contains_key(&(peer2, Protocol::Ping)) + ); } } diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index e46c69dc716..120b9e6c245 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -1,11 +1,11 @@ -use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::collections::hash_map::Entry; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; -use crate::types::GossipKind; use crate::GossipTopic; +use crate::types::GossipKind; use tokio_util::time::delay_queue::{DelayQueue, Key}; diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index 6fffd649f52..873d3f92525 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -1,5 +1,5 @@ -use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::TopicHash; +use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use gossipsub::{IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams}; use std::cmp::max; use std::collections::HashMap; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 18a453db8b0..54c01ae16a5 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,37 +1,37 @@ use self::gossip_cache::GossipCache; -use crate::config::{gossipsub_config, GossipsubConfigParams, NetworkLoad}; +use crate::EnrExt; +use crate::Eth2Enr; +use crate::config::{GossipsubConfigParams, NetworkLoad, gossipsub_config}; use crate::discovery::{ - subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, + DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, subnet_predicate, }; use crate::peer_manager::{ - config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, - ConnectionDirection, PeerManager, PeerManagerEvent, + ConnectionDirection, PeerManager, PeerManagerEvent, config::Config as PeerManagerCfg, + peerdb::score::PeerAction, peerdb::score::ReportSource, }; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; use crate::rpc::methods::MetadataRequest; use crate::rpc::{ - GoodbyeReason, HandlerErr, InboundRequestId, NetworkParams, Protocol, RPCError, RPCMessage, - RPCReceived, RequestType, ResponseTermination, RpcResponse, RpcSuccessResponse, RPC, + GoodbyeReason, HandlerErr, InboundRequestId, NetworkParams, Protocol, RPC, RPCError, + RPCMessage, RPCReceived, RequestType, ResponseTermination, RpcResponse, RpcSuccessResponse, }; use crate::types::{ - all_topics_at_fork, core_topics_to_subscribe, is_fork_non_core_topic, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, + all_topics_at_fork, core_topics_to_subscribe, is_fork_non_core_topic, subnet_from_topic_hash, }; -use crate::EnrExt; -use crate::Eth2Enr; -use crate::{metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; +use crate::{Enr, NetworkGlobals, PubsubMessage, TopicHash, metrics}; use api_types::{AppRequestId, Response}; use futures::stream::StreamExt; use gossipsub::{ IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, TopicScoreParams, }; -use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; +use gossipsub_scoring_parameters::{PeerScoreSettings, lighthouse_gossip_thresholds}; use libp2p::multiaddr::{self, Multiaddr, Protocol as MProtocol}; use libp2p::swarm::behaviour::toggle::Toggle; use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; use libp2p::upnp::tokio::Behaviour as Upnp; -use libp2p::{identify, PeerId, SwarmBuilder}; +use libp2p::{PeerId, SwarmBuilder, identify}; use logging::crit; use std::num::{NonZeroU8, NonZeroUsize}; use std::path::PathBuf; @@ -39,11 +39,11 @@ use std::pin::Pin; use std::sync::Arc; use std::time::Duration; use tracing::{debug, error, info, trace, warn}; +use types::{ChainSpec, ForkName}; use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, + EnrForkId, EthSpec, ForkContext, Slot, SubnetId, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, }; -use types::{ChainSpec, ForkName}; -use utils::{build_transport, strip_peer_id, Context as ServiceContext}; +use utils::{Context as ServiceContext, build_transport, strip_peer_id}; pub mod api_types; mod gossip_cache; @@ -906,19 +906,17 @@ impl Network { MessageAcceptance::Accept => None, MessageAcceptance::Ignore => Some("ignore"), MessageAcceptance::Reject => Some("reject"), - } { - if let Some(client) = self - .network_globals - .peers - .read() - .peer_info(propagation_source) - .map(|info| info.client().kind.as_ref()) - { - metrics::inc_counter_vec( - &metrics::GOSSIP_UNACCEPTED_MESSAGES_PER_CLIENT, - &[client, result], - ) - } + } && let Some(client) = self + .network_globals + .peers + .read() + .peer_info(propagation_source) + .map(|info| info.client().kind.as_ref()) + { + metrics::inc_counter_vec( + &metrics::GOSSIP_UNACCEPTED_MESSAGES_PER_CLIENT, + &[client, result], + ) } self.gossipsub_mut().report_message_validation_result( @@ -1000,12 +998,11 @@ impl Network { if let Err(response) = self .eth2_rpc_mut() .send_response(inbound_request_id, response.into()) + && self.network_globals.peers.read().is_connected(&peer_id) { - if self.network_globals.peers.read().is_connected(&peer_id) { - error!(%peer_id, ?inbound_request_id, %response, - "Request not found in RPC active requests" - ); - } + error!(%peer_id, ?inbound_request_id, %response, + "Request not found in RPC active requests" + ); } } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 9a939368743..4e94129fc35 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -5,8 +5,8 @@ use crate::{GossipTopic, NetworkConfig}; use futures::future::Either; use gossipsub; use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed}; -use libp2p::identity::{secp256k1, Keypair}; -use libp2p::{core, noise, yamux, PeerId, Transport}; +use libp2p::identity::{Keypair, secp256k1}; +use libp2p::{PeerId, Transport, core, noise, yamux}; use prometheus_client::registry::Registry; use ssz::Decode; use std::collections::HashSet; diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index 868cdb6eb9f..0bbbcebaf29 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -15,6 +15,6 @@ pub use globals::NetworkGlobals; pub use pubsub::{PubsubMessage, SnappyTransform}; pub use subnet::{Subnet, SubnetDiscovery}; pub use topics::{ - all_topics_at_fork, core_topics_to_subscribe, is_fork_non_core_topic, subnet_from_topic_hash, - GossipEncoding, GossipKind, GossipTopic, TopicConfig, + GossipEncoding, GossipKind, GossipTopic, TopicConfig, all_topics_at_fork, + core_topics_to_subscribe, is_fork_non_core_topic, subnet_from_topic_hash, }; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 601c59a9c84..567c76a02c0 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -1,8 +1,8 @@ //! Handles the encoding and decoding of pubsub messages. -use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::TopicHash; -use snap::raw::{decompress_len, Decoder, Encoder}; +use crate::types::{GossipEncoding, GossipKind, GossipTopic}; +use snap::raw::{Decoder, Encoder, decompress_len}; use ssz::{Decode, Encode}; use std::io::{Error, ErrorKind}; use std::sync::Arc; @@ -191,7 +191,7 @@ impl PubsubMessage { return Err(format!( "Unknown gossipsub fork digest: {:?}", gossip_topic.fork_digest - )) + )); } }; Ok(PubsubMessage::AggregateAndProofAttestation(Box::new( @@ -242,7 +242,7 @@ impl PubsubMessage { return Err(format!( "Unknown gossipsub fork digest: {:?}", gossip_topic.fork_digest - )) + )); } }; Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) @@ -250,17 +250,16 @@ impl PubsubMessage { GossipKind::BlobSidecar(blob_index) => { if let Some(fork_name) = fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) + && fork_name.deneb_enabled() { - if fork_name.deneb_enabled() { - let blob_sidecar = Arc::new( - BlobSidecar::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ); - return Ok(PubsubMessage::BlobSidecar(Box::new(( - *blob_index, - blob_sidecar, - )))); - } + let blob_sidecar = Arc::new( + BlobSidecar::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ); + return Ok(PubsubMessage::BlobSidecar(Box::new(( + *blob_index, + blob_sidecar, + )))); } Err(format!( @@ -317,7 +316,7 @@ impl PubsubMessage { return Err(format!( "Unknown gossipsub fork digest: {:?}", gossip_topic.fork_digest - )) + )); } }; Ok(PubsubMessage::AttesterSlashing(Box::new(attester_slashing))) @@ -346,30 +345,38 @@ impl PubsubMessage { ))) } GossipKind::LightClientFinalityUpdate => { - let light_client_finality_update = match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { + let light_client_finality_update = match fork_context + .get_fork_from_context_bytes(gossip_topic.fork_digest) + { Some(&fork_name) => { - LightClientFinalityUpdate::from_ssz_bytes(data, fork_name) + LightClientFinalityUpdate::from_ssz_bytes(data, fork_name) .map_err(|e| format!("{:?}", e))? - }, - None => return Err(format!( - "light_client_finality_update topic invalid for given fork digest {:?}", - gossip_topic.fork_digest - )), + } + None => { + return Err(format!( + "light_client_finality_update topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )); + } }; Ok(PubsubMessage::LightClientFinalityUpdate(Box::new( light_client_finality_update, ))) } GossipKind::LightClientOptimisticUpdate => { - let light_client_optimistic_update = match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { + let light_client_optimistic_update = match fork_context + .get_fork_from_context_bytes(gossip_topic.fork_digest) + { Some(&fork_name) => { LightClientOptimisticUpdate::from_ssz_bytes(data, fork_name) - .map_err(|e| format!("{:?}", e))? - }, - None => return Err(format!( - "light_client_optimistic_update topic invalid for given fork digest {:?}", - gossip_topic.fork_digest - )), + .map_err(|e| format!("{:?}", e))? + } + None => { + return Err(format!( + "light_client_optimistic_update topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )); + } }; Ok(PubsubMessage::LightClientOptimisticUpdate(Box::new( light_client_optimistic_update, @@ -436,10 +443,7 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::Attestation(data) => write!( f, "SingleAttestation: subnet_id: {}, attestation_slot: {}, committee_index: {:?}, attester_index: {:?}", - *data.0, - data.1.data.slot, - data.1.committee_index, - data.1.attester_index, + *data.0, data.1.data.slot, data.1.committee_index, data.1.attester_index, ), PubsubMessage::VoluntaryExit(_data) => write!(f, "Voluntary Exit"), PubsubMessage::ProposerSlashing(_data) => write!(f, "Proposer Slashing"), diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index caec40fa2fb..b22adfbc487 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -531,8 +531,10 @@ mod tests { let s = get_sampling_subnets(); let topic_config = get_topic_config(&s); for fork in ForkName::list_all() { - assert!(core_topics_to_subscribe::(fork, &topic_config, &spec,) - .contains(&GossipKind::BeaconBlock)); + assert!( + core_topics_to_subscribe::(fork, &topic_config, &spec,) + .contains(&GossipKind::BeaconBlock) + ); } } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 7503dc55689..0d1a623fbdd 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -1,13 +1,13 @@ #![cfg(test)] -use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::Enr; use lighthouse_network::EnrExt; use lighthouse_network::Multiaddr; +use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::{NetworkConfig, NetworkEvent}; use std::sync::Arc; use std::sync::Weak; use tokio::runtime::Runtime; -use tracing::{debug, error, info_span, Instrument}; +use tracing::{Instrument, debug, error, info_span}; use tracing_subscriber::EnvFilter; use types::{ ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index c3371428197..ad0b4c4462a 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -3,8 +3,8 @@ mod common; use crate::common::spec_with_all_forks_enabled; -use common::{build_tracing_subscriber, Protocol}; -use lighthouse_network::rpc::{methods::*, RequestType}; +use common::{Protocol, build_tracing_subscriber}; +use lighthouse_network::rpc::{RequestType, methods::*}; use lighthouse_network::service::api_types::AppRequestId; use lighthouse_network::{NetworkEvent, ReportSource, Response}; use ssz::Encode; @@ -13,7 +13,7 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::runtime::Runtime; use tokio::time::sleep; -use tracing::{debug, error, info_span, warn, Instrument}; +use tracing::{Instrument, debug, error, info_span, warn}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec, EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MinimalEthSpec, diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index cdb6ba7a83f..dc251bd2d63 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -55,5 +55,7 @@ gossipsub = { workspace = true } k256 = "0.13.4" kzg = { workspace = true } matches = "0.1.8" -rand_chacha = "0.3.1" +rand_08 = { package = "rand", version = "0.8.5" } +rand_chacha = "0.9.0" +rand_chacha_03 = { package = "rand_chacha", version = "0.3.1" } serde_json = { workspace = true } diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 24a179fa807..a2b5af8b086 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -1,14 +1,13 @@ use beacon_chain::{ - attestation_verification::Error as AttnError, + AvailabilityProcessingStatus, BlockError, attestation_verification::Error as AttnError, light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, - sync_committee_verification::Error as SyncCommitteeError, AvailabilityProcessingStatus, - BlockError, + sync_committee_verification::Error as SyncCommitteeError, }; use fnv::FnvHashMap; use lighthouse_network::{ - peer_manager::peerdb::client::ClientKind, types::GossipKind, GossipTopic, Gossipsub, - NetworkGlobals, + GossipTopic, Gossipsub, NetworkGlobals, peer_manager::peerdb::client::ClientKind, + types::GossipKind, }; pub use metrics::*; use std::sync::{Arc, LazyLock}; @@ -117,17 +116,18 @@ pub static BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_processor_gossip_block_requeued_total", - "Total number of gossip blocks that arrived early and were re-queued for later processing." - ) + "beacon_processor_gossip_block_requeued_total", + "Total number of gossip blocks that arrived early and were re-queued for later processing.", + ) }); -pub static BEACON_PROCESSOR_GOSSIP_BLOCK_EARLY_SECONDS: LazyLock> = - LazyLock::new(|| { +pub static BEACON_PROCESSOR_GOSSIP_BLOCK_EARLY_SECONDS: LazyLock> = LazyLock::new( + || { try_create_histogram( - "beacon_processor_gossip_block_early_seconds", - "Whenever a gossip block is received early this metrics is set to how early that block was." - ) - }); + "beacon_processor_gossip_block_early_seconds", + "Whenever a gossip block is received early this metrics is set to how early that block was.", + ) + }, +); pub static BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( @@ -259,9 +259,9 @@ pub static BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_IMPORTED_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_processor_unaggregated_attestation_requeued_total", - "Total number of unaggregated attestations that referenced an unknown block and were re-queued." - ) + "beacon_processor_unaggregated_attestation_requeued_total", + "Total number of unaggregated attestations that referenced an unknown block and were re-queued.", + ) }); // Aggregated attestations. pub static BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL: LazyLock> = @@ -281,9 +281,9 @@ pub static BEACON_PROCESSOR_AGGREGATED_ATTESTATION_IMPORTED_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_processor_aggregated_attestation_requeued_total", - "Total number of aggregated attestations that referenced an unknown block and were re-queued." - ) + "beacon_processor_aggregated_attestation_requeued_total", + "Total number of aggregated attestations that referenced an unknown block and were re-queued.", + ) }); // Sync committee messages. pub static BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL: LazyLock> = @@ -504,9 +504,9 @@ pub static BEACON_BLOCK_DELAY_GOSSIP: LazyLock> = LazyLock::new pub static BEACON_BLOCK_DELAY_GOSSIP_VERIFICATION: LazyLock> = LazyLock::new( || { try_create_int_gauge( - "beacon_block_delay_gossip_verification", - "Keeps track of the time delay from the start of the slot to the point we propagate the block" - ) + "beacon_block_delay_gossip_verification", + "Keeps track of the time delay from the start of the slot to the point we propagate the block", + ) }, ); pub static BEACON_BLOCK_DELAY_FULL_VERIFICATION: LazyLock> = LazyLock::new(|| { @@ -519,9 +519,9 @@ pub static BEACON_BLOCK_DELAY_FULL_VERIFICATION: LazyLock> = La pub static BEACON_BLOCK_DELAY_GOSSIP_ARRIVED_LATE_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_block_delay_gossip_arrived_late_total", - "Count of times when a gossip block arrived from the network later than the attestation deadline.", - ) + "beacon_block_delay_gossip_arrived_late_total", + "Count of times when a gossip block arrived from the network later than the attestation deadline.", + ) }); /* @@ -541,28 +541,30 @@ pub static BEACON_DATA_COLUMN_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: LazyLo "beacon_data_column_gossip_propagation_verification_delay_time", "Duration between when the data column sidecar is received over gossip and when it is verified for propagation.", // [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] - decimal_buckets(-3,-1) + decimal_buckets(-3, -1), ) }); pub static BEACON_DATA_COLUMN_GOSSIP_SLOT_START_DELAY_TIME: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( - "beacon_data_column_gossip_slot_start_delay_time", - "Duration between when the data column sidecar is received over gossip and the start of the slot it belongs to.", - // Create a custom bucket list for greater granularity in block delay - Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) - // NOTE: Previous values, which we may want to switch back to. - // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - //decimal_buckets(-1,2) - ) + "beacon_data_column_gossip_slot_start_delay_time", + "Duration between when the data column sidecar is received over gossip and the start of the slot it belongs to.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![ + 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 5.0, + 6.0, 7.0, 8.0, 9.0, 10.0, 15.0, 20.0, + ]), // NOTE: Previous values, which we may want to switch back to. + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + //decimal_buckets(-1,2) + ) }); pub static BEACON_BLOB_DELAY_GOSSIP_VERIFICATION: LazyLock> = LazyLock::new( || { try_create_int_gauge( - "beacon_blob_delay_gossip_verification", - "Keeps track of the time delay from the start of the slot to the point we propagate the blob" - ) + "beacon_blob_delay_gossip_verification", + "Keeps track of the time delay from the start of the slot to the point we propagate the blob", + ) }, ); pub static BEACON_BLOB_DELAY_FULL_VERIFICATION: LazyLock> = LazyLock::new(|| { @@ -575,24 +577,25 @@ pub static BEACON_BLOB_DELAY_FULL_VERIFICATION: LazyLock> = Laz pub static BEACON_BLOB_RPC_SLOT_START_DELAY_TIME: LazyLock> = LazyLock::new( || { try_create_histogram_with_buckets( - "beacon_blob_rpc_slot_start_delay_time", - "Duration between when a blob is received over rpc and the start of the slot it belongs to.", - // Create a custom bucket list for greater granularity in block delay - Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) - // NOTE: Previous values, which we may want to switch back to. - // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - //decimal_buckets(-1,2) - - ) + "beacon_blob_rpc_slot_start_delay_time", + "Duration between when a blob is received over rpc and the start of the slot it belongs to.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![ + 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 5.0, + 6.0, 7.0, 8.0, 9.0, 10.0, 15.0, 20.0, + ]), // NOTE: Previous values, which we may want to switch back to. + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + //decimal_buckets(-1,2) + ) }, ); pub static BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL: LazyLock> = LazyLock::new( || { try_create_int_counter( - "beacon_blob_gossip_arrived_late_total", - "Count of times when a gossip blob arrived from the network later than the attestation deadline.", - ) + "beacon_blob_gossip_arrived_late_total", + "Count of times when a gossip blob arrived from the network later than the attestation deadline.", + ) }, ); @@ -604,7 +607,7 @@ pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: LazyLock > = LazyLock::new(|| { try_create_int_counter( "beacon_processor_reprocessing_queue_sent_optimistic_updates", - "Number of queued light client optimistic updates where as matching block has been imported." + "Number of queued light client optimistic updates where as matching block has been imported.", ) }); diff --git a/beacon_node/network/src/nat.rs b/beacon_node/network/src/nat.rs index ce9d241d43d..f1c768e67b9 100644 --- a/beacon_node/network/src/nat.rs +++ b/beacon_node/network/src/nat.rs @@ -3,8 +3,8 @@ //! Currently supported strategies: //! - UPnP -use anyhow::{bail, Context, Error}; -use igd_next::{aio::tokio as igd, PortMappingProtocol}; +use anyhow::{Context, Error, bail}; +use igd_next::{PortMappingProtocol, aio::tokio as igd}; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::time::Duration; use tokio::time::sleep; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 5cf5d21c0a8..7ccbab19deb 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -9,6 +9,8 @@ use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::store::Error; use beacon_chain::{ + AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, + GossipVerifiedBlock, NotifyExecutionLayer, attestation_verification::{self, Error as AttnError, VerifiedAttestation}, data_availability_checker::AvailabilityCheckErrorCategory, light_client_finality_update_verification::Error as LightClientFinalityUpdateError, @@ -16,8 +18,6 @@ use beacon_chain::{ observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, - AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, - GossipVerifiedBlock, NotifyExecutionLayer, }; use beacon_processor::{Work, WorkEvent}; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; @@ -32,23 +32,22 @@ use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc::error::TrySendError; -use tracing::{debug, error, info, instrument, trace, warn, Instrument, Span}; +use tracing::{Instrument, Span, debug, error, info, instrument, trace, warn}; use types::{ - beacon_block::BlockImportSource, Attestation, AttestationData, AttestationRef, - AttesterSlashing, BlobSidecar, DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, - IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + Attestation, AttestationData, AttestationRef, AttesterSlashing, BlobSidecar, DataColumnSidecar, + DataColumnSubnetId, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, + Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, beacon_block::BlockImportSource, }; use beacon_processor::work_reprocessing_queue::QueuedColumnReconstruction; use beacon_processor::{ + DuplicateCache, GossipAggregatePackage, GossipAttestationBatch, work_reprocessing_queue::{ QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, ReprocessQueueMessage, }, - DuplicateCache, GossipAggregatePackage, GossipAttestationBatch, }; /// Set to `true` to introduce stricter penalties for peers who send some types of late consensus @@ -1520,7 +1519,7 @@ impl NetworkBeaconProcessor { "Block with unknown parent attempted to be processed" ); } - Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { + Err(e @ BlockError::ExecutionPayloadError(epe)) if !epe.penalize_peer() => { debug!( error = %e, "Failed to verify execution payload" diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index a78fea453ec..ccf002e7e5d 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1,10 +1,10 @@ use crate::sync::manager::BlockProcessType; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; -use beacon_chain::blob_verification::{observe_gossip_blob, GossipBlobError}; +use beacon_chain::blob_verification::{GossipBlobError, observe_gossip_blob}; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::data_column_verification::{observe_gossip_data_column, GossipDataColumnError}; +use beacon_chain::data_column_verification::{GossipDataColumnError, observe_gossip_data_column}; use beacon_chain::fetch_blobs::{ - fetch_and_process_engine_blobs, EngineGetBlobsOutput, FetchEngineBlobError, + EngineGetBlobsOutput, FetchEngineBlobError, fetch_and_process_engine_blobs, }; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, NotifyExecutionLayer, @@ -13,14 +13,14 @@ use beacon_processor::{ BeaconProcessorSend, DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, }; +use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, LightClientUpdatesByRangeRequest, }; -use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::{ - rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PubsubMessage, + rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, }; use rand::prelude::SliceRandom; use std::path::PathBuf; @@ -922,7 +922,7 @@ impl NetworkBeaconProcessor { // Permute the blobs and split them into batches. // The hope is that we won't need to publish some blobs because we will receive them // on gossip from other nodes. - blobs.shuffle(&mut rand::thread_rng()); + blobs.shuffle(&mut rand::rng()); let blob_publication_batch_interval = chain.config.blob_publication_batch_interval; let mut publish_count = 0usize; @@ -1004,7 +1004,7 @@ impl NetworkBeaconProcessor { // Permute the columns and split them into batches. // The hope is that we won't need to publish some columns because we will receive them // on gossip from other nodes. - data_columns_to_publish.shuffle(&mut rand::thread_rng()); + data_columns_to_publish.shuffle(&mut rand::rng()); let blob_publication_batch_interval = chain.config.blob_publication_batch_interval; let blob_publication_batches = chain.config.blob_publication_batches; diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index e5541b1a291..117377c9245 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -1,10 +1,10 @@ use crate::metrics; -use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; +use crate::network_beacon_processor::{FUTURE_SLOT_TOLERANCE, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; -use itertools::{process_results, Itertools}; +use itertools::{Itertools, process_results}; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, }; @@ -12,7 +12,7 @@ use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, ReportSource, Response, SyncInfo}; use methods::LightClientUpdatesByRangeRequest; use slot_clock::SlotClock; -use std::collections::{hash_map::Entry, HashMap}; +use std::collections::{HashMap, hash_map::Entry}; use std::sync::Arc; use tokio_stream::StreamExt; use tracing::{debug, error, instrument, warn}; @@ -708,7 +708,7 @@ impl NetworkBeaconProcessor { Err(e) => { if matches!( e, - BeaconChainError::ExecutionLayerErrorPayloadReconstruction(_block_hash, ref boxed_error) + BeaconChainError::ExecutionLayerErrorPayloadReconstruction(_block_hash, boxed_error) if matches!(**boxed_error, execution_layer::Error::EngineError(_)) ) { warn!( diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 7f1453067a2..306a184627e 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -1,27 +1,27 @@ use crate::metrics::{self, register_process_result_metrics}; -use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; +use crate::network_beacon_processor::{FUTURE_SLOT_TOLERANCE, NetworkBeaconProcessor}; use crate::sync::BatchProcessResult; use crate::sync::{ - manager::{BlockProcessType, SyncMessage}, ChainId, + manager::{BlockProcessType, SyncMessage}, }; use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::data_availability_checker::MaybeAvailableBlock; use beacon_chain::{ - validator_monitor::get_slot_delay_ms, AvailabilityProcessingStatus, BeaconChainTypes, - BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, + AvailabilityProcessingStatus, BeaconChainTypes, BlockError, ChainSegmentResult, + HistoricalBlockError, NotifyExecutionLayer, validator_monitor::get_slot_delay_ms, }; use beacon_processor::{ - work_reprocessing_queue::{QueuedRpcBlock, ReprocessQueueMessage}, AsyncFn, BlockingFn, DuplicateCache, + work_reprocessing_queue::{QueuedRpcBlock, ReprocessQueueMessage}, }; use beacon_processor::{Work, WorkEvent}; use lighthouse_network::PeerAction; use std::sync::Arc; use std::time::Duration; use store::KzgCommitment; -use tracing::{debug, error, info, instrument, warn, Span}; +use tracing::{Span, debug, error, info, instrument, warn}; use types::beacon_block_body::format_kzg_commitments; use types::blob_sidecar::FixedBlobSidecarList; use types::{BlockImportSource, DataColumnSidecarList, Epoch, Hash256}; @@ -277,15 +277,15 @@ impl NetworkBeaconProcessor { "RPC blobs received" ); - if let Ok(current_slot) = self.chain.slot() { - if current_slot == slot { - // Note: this metric is useful to gauge how long it takes to receive blobs requested - // over rpc. Since we always send the request for block components at `slot_clock.single_lookup_delay()` - // we can use that as a baseline to measure against. - let delay = get_slot_delay_ms(seen_timestamp, slot, &self.chain.slot_clock); + if let Ok(current_slot) = self.chain.slot() + && current_slot == slot + { + // Note: this metric is useful to gauge how long it takes to receive blobs requested + // over rpc. Since we always send the request for block components at `slot_clock.single_lookup_delay()` + // we can use that as a baseline to measure against. + let delay = get_slot_delay_ms(seen_timestamp, slot, &self.chain.slot_clock); - metrics::observe_duration(&metrics::BEACON_BLOB_RPC_SLOT_START_DELAY_TIME, delay); - } + metrics::observe_duration(&metrics::BEACON_BLOB_RPC_SLOT_START_DELAY_TIME, delay); } let result = self.chain.process_rpc_blobs(slot, block_root, blobs).await; @@ -347,11 +347,11 @@ impl NetworkBeaconProcessor { return; }; - if let Ok(current_slot) = self.chain.slot() { - if current_slot == slot { - let delay = get_slot_delay_ms(seen_timestamp, slot, &self.chain.slot_clock); - metrics::observe_duration(&metrics::BEACON_BLOB_RPC_SLOT_START_DELAY_TIME, delay); - } + if let Ok(current_slot) = self.chain.slot() + && current_slot == slot + { + let delay = get_slot_delay_ms(seen_timestamp, slot, &self.chain.slot_clock); + metrics::observe_duration(&metrics::BEACON_BLOB_RPC_SLOT_START_DELAY_TIME, delay); } let mut indices = custody_columns.iter().map(|d| d.index).collect::>(); @@ -602,7 +602,7 @@ impl NetworkBeaconProcessor { peer_action: Some(PeerAction::LowToleranceError), message: format!("Failed to check block availability : {:?}", e), }), - ) + ); } }, }; diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 6408fcffd97..557f9a29141 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -6,27 +6,27 @@ use crate::{ ChainSegmentProcessId, DuplicateCache, InvalidBlockStorage, NetworkBeaconProcessor, }, service::NetworkMessage, - sync::{manager::BlockProcessType, SyncMessage}, + sync::{SyncMessage, manager::BlockProcessType}, }; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::data_column_verification::validate_data_column_sidecar_for_gossip; use beacon_chain::kzg_utils::blobs_to_data_column_sidecars; use beacon_chain::observed_data_sidecars::DoNotObserve; use beacon_chain::test_utils::{ - get_kzg, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, - EphemeralHarnessType, + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, get_kzg, + test_spec, }; use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; use gossipsub::MessageAcceptance; use itertools::Itertools; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MetaDataV3}; use lighthouse_network::rpc::InboundRequestId; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MetaDataV3}; use lighthouse_network::{ + Client, MessageId, NetworkConfig, NetworkGlobals, PeerId, Response, discv5::enr::{self, CombinedKey}, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, - Client, MessageId, NetworkConfig, NetworkGlobals, PeerId, Response, }; use matches::assert_matches; use slot_clock::SlotClock; @@ -654,10 +654,10 @@ impl TestRig { loop { // Break if we've received the requested count of messages - if let Some(target_count) = count { - if events.len() >= target_count { - break; - } + if let Some(target_count) = count + && events.len() >= target_count + { + break; } tokio::select! { diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index 938b08a315c..113b3cdd323 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -70,8 +70,8 @@ impl StoreItem for PersistedDht { mod tests { use super::*; use std::str::FromStr; - use store::config::StoreConfig; use store::MemoryStore; + use store::config::StoreConfig; use types::{ChainSpec, MinimalEthSpec}; #[test] fn test_persisted_dht() { diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index ddc043bf48d..d534c354d90 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -14,11 +14,11 @@ use beacon_processor::{BeaconProcessorSend, DuplicateCache}; use futures::prelude::*; use lighthouse_network::rpc::*; use lighthouse_network::{ - service::api_types::{AppRequestId, SyncRequestId}, MessageId, NetworkGlobals, PeerId, PubsubMessage, Response, + service::api_types::{AppRequestId, SyncRequestId}, }; -use logging::crit; use logging::TimeLatch; +use logging::crit; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a82f94ec335..c97206ea873 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,31 +1,31 @@ +use crate::NetworkConfig; use crate::metrics; use crate::nat; use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; use crate::subnet_service::{SubnetService, SubnetServiceMessage, Subscription}; -use crate::NetworkConfig; use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_processor::BeaconProcessorSend; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; -use lighthouse_network::rpc::methods::RpcResponse; +use lighthouse_network::Enr; use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::RequestType; +use lighthouse_network::rpc::methods::RpcResponse; use lighthouse_network::service::Network; use lighthouse_network::types::GossipKind; -use lighthouse_network::Enr; -use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ - rpc::{GoodbyeReason, RpcErrorResponse}, Context, PeerAction, PubsubMessage, ReportSource, Response, Subnet, + rpc::{GoodbyeReason, RpcErrorResponse}, }; +use lighthouse_network::{MessageAcceptance, prometheus_client::registry::Registry}; use lighthouse_network::{ - service::api_types::AppRequestId, - types::{core_topics_to_subscribe, GossipEncoding, GossipTopic}, MessageId, NetworkEvent, NetworkGlobals, PeerId, + service::api_types::AppRequestId, + types::{GossipEncoding, GossipTopic, core_topics_to_subscribe}, }; use logging::crit; use std::collections::BTreeSet; @@ -393,13 +393,12 @@ impl NetworkService { let mut result = vec![fork_context.context_bytes(current_epoch)]; - if let Some(next_digest_epoch) = spec.next_digest_epoch(current_epoch) { - if current_slot.saturating_add(Slot::new(SUBSCRIBE_DELAY_SLOTS)) + if let Some(next_digest_epoch) = spec.next_digest_epoch(current_epoch) + && current_slot.saturating_add(Slot::new(SUBSCRIBE_DELAY_SLOTS)) >= next_digest_epoch.start_slot(T::EthSpec::slots_per_epoch()) - { - let next_digest = fork_context.context_bytes(next_digest_epoch); - result.push(next_digest); - } + { + let next_digest = fork_context.context_bytes(next_digest_epoch); + result.push(next_digest); } result diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index a8f68384a02..64815ab2bb4 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -2,8 +2,8 @@ #![cfg(test)] use crate::persisted_dht::load_dht; use crate::{NetworkConfig, NetworkService}; -use beacon_chain::test_utils::BeaconChainHarness; use beacon_chain::BeaconChainTypes; +use beacon_chain::test_utils::BeaconChainHarness; use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; use futures::StreamExt; use lighthouse_network::types::{GossipEncoding, GossipKind}; diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index 6c2ada447d2..ebf5c1829e5 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,7 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use types::{EthSpec, FixedBytesExtended, Hash256}; -use lighthouse_network::rpc::{methods::StatusMessageV2, StatusMessage}; +use lighthouse_network::rpc::{StatusMessage, methods::StatusMessageV2}; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. /// /// NOTE: The purpose of this is simply to obtain a `StatusMessage` from the `BeaconChain` without diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index 2d4fa778695..be491e56d3e 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -13,7 +13,7 @@ use tokio::time::Instant; use beacon_chain::{BeaconChain, BeaconChainTypes}; use delay_map::HashSetDelay; use futures::prelude::*; -use lighthouse_network::{discv5::enr::NodeId, NetworkConfig, Subnet, SubnetDiscovery}; +use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery, discv5::enr::NodeId}; use slot_clock::SlotClock; use tracing::{debug, error, info, warn}; use types::{ @@ -347,11 +347,11 @@ impl SubnetService { // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the // required subnets. - if !self.discovery_disabled { - if let Err(e) = self.discover_peers_request(subnets_to_discover.into_iter()) { - warn!(error = e, "Discovery lookup request error"); - }; - } + if !self.discovery_disabled + && let Err(e) = self.discover_peers_request(subnets_to_discover.into_iter()) + { + warn!(error = e, "Discovery lookup request error"); + }; } /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip @@ -671,13 +671,13 @@ impl Stream for SubnetService { } // Poll to remove entries on expiration, no need to act on expiration events. - if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { - if let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) { - error!( - error = e, - "Failed to check for aggregate validator on subnet expirations" - ); - } + if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() + && let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) + { + error!( + error = e, + "Failed to check for aggregate validator on subnet expirations" + ); } Poll::Pending diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 86d1be08ece..03627dde054 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -1,13 +1,13 @@ use super::*; use beacon_chain::{ + BeaconChain, builder::{BeaconChainBuilder, Witness}, test_utils::get_kzg, - BeaconChain, }; -use genesis::{generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; +use genesis::{DEFAULT_ETH1_BLOCK_HASH, generate_deterministic_keypairs, interop_genesis_state}; use lighthouse_network::NetworkConfig; -use rand::rngs::StdRng; use rand::SeedableRng; +use rand::rngs::StdRng; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::sync::{Arc, LazyLock}; use std::time::{Duration, SystemTime}; @@ -130,11 +130,10 @@ async fn get_events_until_timeout + Unpin tokio::select! { Some(event) = stream.next() => { events.push(event); - if let Some(num) = num_events { - if events.len() == num { + if let Some(num) = num_events + && events.len() == num { break; } - } } _ = sleep.as_mut() => { break; @@ -481,7 +480,7 @@ mod test { // and 1 `DiscoverPeer` request corresponding to the bulk subnet discovery. assert_eq!(discover_peer_count, 1 + 1); // Generates a single discovery for permanent - // subscriptions and 1 for the subscription + // subscriptions and 1 for the subscription assert_eq!(enr_add_count, subnets_per_node); assert_eq!(unexpected_msg_count, 0); } @@ -584,7 +583,7 @@ mod test { println!("{events:?}"); let subscription_slot = current_slot + subscription_slot2 - 1; // one less do to the - // advance subscription time + // advance subscription time let wait_duration = subnet_service .beacon_chain .slot_clock diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index e7a57092dd3..ae9ac2e7705 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -24,8 +24,8 @@ use lighthouse_network::types::{BackFillState, NetworkGlobals}; use lighthouse_network::{PeerAction, PeerId}; use logging::crit; use std::collections::{ - btree_map::{BTreeMap, Entry}, HashSet, + btree_map::{BTreeMap, Entry}, }; use std::sync::Arc; use tracing::{debug, error, info, warn}; @@ -484,7 +484,7 @@ impl BackFillSync { Err(e) => { return self .fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)) - .map(|_| ProcessResult::Successful) + .map(|_| ProcessResult::Successful); } Ok(v) => v, }; @@ -748,7 +748,7 @@ impl BackFillSync { // only for batches awaiting validation can we be sure the last attempt is // right, and thus, that any different attempt is wrong match batch.state() { - BatchState::AwaitingValidation(ref processed_attempt) => { + BatchState::AwaitingValidation(processed_attempt) => { for attempt in batch.attempts() { // The validated batch has been re-processed if attempt.hash != processed_attempt.hash { @@ -796,10 +796,10 @@ impl BackFillSync { BatchState::AwaitingProcessing(..) => {} BatchState::Processing(_) => { debug!(batch = %id, %batch, "Advancing chain while processing a batch"); - if let Some(processing_id) = self.current_processing_batch { - if id >= processing_id { - self.current_processing_batch = None; - } + if let Some(processing_id) = self.current_processing_batch + && id >= processing_id + { + self.current_processing_batch = None; } } } @@ -844,7 +844,7 @@ impl BackFillSync { for (id, batch) in self .batches .iter_mut() - .filter(|(&id, _batch)| id > batch_id) + .filter(|&(&id, ref _batch)| id > batch_id) { match batch .validation_failed() @@ -934,7 +934,7 @@ impl BackFillSync { self.fail_sync(BackFillError::BatchDownloadFailed(batch_id))? } Ok(BatchOperationOutcome::Continue) => { - return self.send_batch(network, batch_id) + return self.send_batch(network, batch_id); } } } @@ -1115,7 +1115,7 @@ impl BackFillSync { ) -> bool { if network.chain.spec.is_peer_das_enabled_for_epoch(epoch) { // Require peers on all sampling column subnets before sending batches - let peers_on_all_custody_subnets = network + network .network_globals() .sampling_subnets() .iter() @@ -1127,8 +1127,7 @@ impl BackFillSync { .good_range_sync_custody_subnet_peers(*subnet_id) .count(); peer_count > 0 - }); - peers_on_all_custody_subnets + }) } else { true } @@ -1196,8 +1195,8 @@ mod tests { use beacon_chain::test_utils::BeaconChainHarness; use bls::Hash256; use lighthouse_network::{NetworkConfig, SyncInfo, SyncStatus}; - use rand::prelude::StdRng; - use rand::SeedableRng; + use rand_08::SeedableRng; + use rand_08::prelude::StdRng; use types::MinimalEthSpec; #[test] diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 86b6894bac4..c6b05190871 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -14,8 +14,8 @@ use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; use types::{DataColumnSidecarList, SignedBeaconBlock}; -use super::single_block_lookup::{ComponentRequests, DownloadResult}; use super::SingleLookupId; +use super::single_block_lookup::{ComponentRequests, DownloadResult}; #[derive(Debug, Copy, Clone)] pub enum ResponseType { diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index cc732a865e7..f8f8d8a9a5b 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -20,15 +20,15 @@ //! or consider a lookup complete. These caches are read from the `SyncNetworkContext` and its state //! returned to this module as `LookupRequestResult` variants. -use self::parent_chain::{compute_parent_chains, NodeChain}; +use self::parent_chain::{NodeChain, compute_parent_chains}; pub use self::single_block_lookup::DownloadResult; use self::single_block_lookup::{LookupRequestError, LookupResult, SingleBlockLookup}; use super::manager::{BlockProcessType, BlockProcessingResult, SLOT_IMPORT_TOLERANCE}; use super::network_context::{PeerGroup, RpcResponseError, SyncNetworkContext}; use crate::metrics; +use crate::sync::SyncMessage; use crate::sync::block_lookups::common::ResponseType; use crate::sync::block_lookups::parent_chain::find_oldest_fork_ancestor; -use crate::sync::SyncMessage; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_availability_checker::{ AvailabilityCheckError, AvailabilityCheckErrorCategory, @@ -365,15 +365,14 @@ impl BlockLookups { } // Ensure that awaiting parent exists, otherwise this lookup won't be able to make progress - if let Some(awaiting_parent) = awaiting_parent { - if !self + if let Some(awaiting_parent) = awaiting_parent + && !self .single_block_lookups .iter() .any(|(_, lookup)| lookup.is_for_block(awaiting_parent)) - { - warn!(block_root = ?awaiting_parent, "Ignoring child lookup parent lookup not found"); - return false; - } + { + warn!(block_root = ?awaiting_parent, "Ignoring child lookup parent lookup not found"); + return false; } // Lookups contain untrusted data, bound the total count of lookups hold in memory to reduce diff --git a/beacon_node/network/src/sync/block_lookups/parent_chain.rs b/beacon_node/network/src/sync/block_lookups/parent_chain.rs index 009b5e2ff74..551a0261f2c 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_chain.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_chain.rs @@ -117,7 +117,7 @@ pub(crate) fn find_oldest_fork_ancestor( #[cfg(test)] mod tests { - use super::{compute_parent_chains, find_oldest_fork_ancestor, Node}; + use super::{Node, compute_parent_chains, find_oldest_fork_ancestor}; use types::{FixedBytesExtended, Hash256}; fn h(n: u64) -> Hash256 { diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 8ccbc64a17a..605da3b4bda 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -2,10 +2,10 @@ use beacon_chain::{ block_verification_types::RpcBlock, data_column_verification::CustodyDataColumn, get_block_root, }; use lighthouse_network::{ + PeerAction, PeerId, service::api_types::{ BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, }, - PeerAction, PeerId, }; use std::{collections::HashMap, sync::Arc}; use types::{ @@ -150,7 +150,7 @@ impl RangeBlockComponentsRequest { ) -> Result<(), String> { match &mut self.block_data_request { RangeBlockDataRequest::NoData => Err("received blobs but expected no data".to_owned()), - RangeBlockDataRequest::Blobs(ref mut req) => req.finish(req_id, blobs), + RangeBlockDataRequest::Blobs(req) => req.finish(req_id, blobs), RangeBlockDataRequest::DataColumns { .. } => { Err("received blobs but expected data columns".to_owned()) } @@ -173,9 +173,7 @@ impl RangeBlockComponentsRequest { RangeBlockDataRequest::Blobs(_) => { Err("received data columns but expected blobs".to_owned()) } - RangeBlockDataRequest::DataColumns { - ref mut requests, .. - } => { + RangeBlockDataRequest::DataColumns { requests, .. } => { let req = requests .get_mut(&req_id) .ok_or(format!("unknown data columns by range req_id {req_id}"))?; @@ -197,7 +195,8 @@ impl RangeBlockComponentsRequest { return None; }; - let resp = match &mut self.block_data_request { + // Increment the attempt once this function returns the response or errors + match &mut self.block_data_request { RangeBlockDataRequest::NoData => { Some(Self::responses_with_blobs(blocks.to_vec(), vec![], spec)) } @@ -264,10 +263,7 @@ impl RangeBlockComponentsRequest { Some(resp) } - }; - - // Increment the attempt once this function returns the response or errors - resp + } } fn responses_with_blobs( @@ -283,11 +279,10 @@ impl RangeBlockComponentsRequest { let max_blobs_per_block = spec.max_blobs_per_block(block.epoch()) as usize; let mut blob_list = Vec::with_capacity(max_blobs_per_block); while { - let pair_next_blob = blob_iter + blob_iter .peek() .map(|sidecar| sidecar.slot() == block.slot()) - .unwrap_or(false); - pair_next_blob + .unwrap_or(false) } { blob_list.push(blob_iter.next().ok_or_else(|| { CouplingError::BlobPeerFailure("Missing next blob".to_string()) @@ -467,18 +462,18 @@ mod tests { use super::RangeBlockComponentsRequest; use crate::sync::network_context::MAX_COLUMN_RETRIES; use beacon_chain::test_utils::{ - generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, NumBlobs, + NumBlobs, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, }; use lighthouse_network::{ + PeerAction, PeerId, service::api_types::{ BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, DataColumnsByRangeRequestId, Id, RangeRequestId, }, - PeerAction, PeerId, }; use rand::SeedableRng; use std::sync::Arc; - use types::{test_utils::XorShiftRng, Epoch, ForkName, MinimalEthSpec as E, SignedBeaconBlock}; + use types::{Epoch, ForkName, MinimalEthSpec as E, SignedBeaconBlock, test_utils::XorShiftRng}; fn components_id() -> ComponentsByRangeRequestId { ComponentsByRangeRequestId { @@ -934,10 +929,9 @@ mod tests { if let Err(super::CouplingError::DataColumnPeerFailure { exceeded_retries, .. }) = &result + && *exceeded_retries { - if *exceeded_retries { - break; - } + break; } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index cb25c3c77bf..448e784ab6d 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,8 +38,8 @@ use super::block_lookups::BlockLookups; use super::network_context::{ CustodyByRootResult, RangeBlockComponent, RangeRequestId, RpcEvent, SyncNetworkContext, }; -use super::peer_sync_info::{remote_sync_type, PeerSyncType}; -use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; +use super::peer_sync_info::{PeerSyncType, remote_sync_type}; +use super::range_sync::{EPOCHS_PER_BATCH, RangeSync, RangeSyncType}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; @@ -53,6 +53,7 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState, }; use futures::StreamExt; +use lighthouse_network::SyncInfo; use lighthouse_network::rpc::RPCError; use lighthouse_network::service::api_types::{ BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, CustodyRequester, @@ -60,7 +61,6 @@ use lighthouse_network::service::api_types::{ SingleLookupReqId, SyncRequestId, }; use lighthouse_network::types::{NetworkGlobals, SyncState}; -use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; use logging::crit; use lru_cache::LRUTimeCache; @@ -248,7 +248,10 @@ pub fn spawn( fork_context: Arc, ) { assert!( - beacon_chain.spec.max_request_blocks(fork_context.current_fork_name()) as u64 >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, + beacon_chain + .spec + .max_request_blocks(fork_context.current_fork_name()) as u64 + >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, "Max blocks that can be requested in a single batch greater than max allowed blocks in a single request" ); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 0a1c6fbd3a7..76e5ed3f5d9 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -3,10 +3,10 @@ use self::custody::{ActiveCustodyRequest, Error as CustodyRequestError}; pub use self::requests::{BlocksByRootSingleRequest, DataColumnsByRootSingleBlockRequest}; +use super::SyncMessage; use super::block_sidecar_coupling::RangeBlockComponentsRequest; use super::manager::BlockProcessType; use super::range_sync::ByRangeRequestType; -use super::SyncMessage; use crate::metrics; use crate::network_beacon_processor::NetworkBeaconProcessor; #[cfg(test)] @@ -444,13 +444,11 @@ impl SyncNetworkContext { request: BlocksByRangeRequest, failed_columns: &HashSet, ) -> Result<(), String> { - let Some(requester) = self.components_by_range_requests.keys().find_map(|r| { - if r.id == id { - Some(r.requester) - } else { - None - } - }) else { + let Some(requester) = self + .components_by_range_requests + .keys() + .find_map(|r| if r.id == id { Some(r.requester) } else { None }) + else { return Err("request id not present".to_string()); }; @@ -795,7 +793,7 @@ impl SyncNetworkContext { BlockProcessStatus::ExecutionValidated { .. } => { return Ok(LookupRequestResult::NoRequestNeeded( "block execution validated", - )) + )); } } diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index f4d010b881e..f668e650b22 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -1,11 +1,11 @@ use crate::sync::network_context::{ DataColumnsByRootRequestId, DataColumnsByRootSingleBlockRequest, }; -use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::BeaconChainTypes; +use beacon_chain::validator_monitor::timestamp_now; use fnv::FnvHashMap; -use lighthouse_network::service::api_types::{CustodyId, DataColumnsByRootRequester}; use lighthouse_network::PeerId; +use lighthouse_network::service::api_types::{CustodyId, DataColumnsByRootRequester}; use lru_cache::LRUTimeCache; use parking_lot::RwLock; use rand::Rng; @@ -14,7 +14,7 @@ use std::time::{Duration, Instant}; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use tracing::{debug, warn}; use types::EthSpec; -use types::{data_column_sidecar::ColumnIndex, DataColumnSidecar, Hash256}; +use types::{DataColumnSidecar, Hash256, data_column_sidecar::ColumnIndex}; use super::{LookupRequestResult, PeerGroup, RpcResponseResult, SyncNetworkContext}; @@ -258,7 +258,7 @@ impl ActiveCustodyRequest { active_request_count_by_peer.get(peer).copied().unwrap_or(0) + columns_to_request_by_peer.get(peer).map(|_| 1).unwrap_or(0), // Random factor to break ties, otherwise the PeerID breaks ties - rand::thread_rng().gen::(), + rand::rng().random::(), *peer, ) }) diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs index 547c51198e4..0d176e2d8ce 100644 --- a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs @@ -1,6 +1,6 @@ use lighthouse_network::rpc::methods::BlobsByRootRequest; use std::sync::Arc; -use types::{blob_sidecar::BlobIdentifier, BlobSidecar, EthSpec, ForkContext, Hash256}; +use types::{BlobSidecar, EthSpec, ForkContext, Hash256, blob_sidecar::BlobIdentifier}; use super::{ActiveRequestItems, LookupVerifyError}; diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 5b48c302906..1f516139969 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,7 +1,7 @@ use beacon_chain::block_verification_types::RpcBlock; +use lighthouse_network::PeerId; use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::service::api_types::Id; -use lighthouse_network::PeerId; use std::collections::HashSet; use std::fmt; use std::hash::{Hash, Hasher}; @@ -459,17 +459,15 @@ impl Attempt { impl std::fmt::Debug for BatchState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - BatchState::Processing(Attempt { - ref peer_id, - hash: _, - }) => write!(f, "Processing({})", peer_id), - BatchState::AwaitingValidation(Attempt { - ref peer_id, - hash: _, - }) => write!(f, "AwaitingValidation({})", peer_id), + BatchState::Processing(Attempt { peer_id, hash: _ }) => { + write!(f, "Processing({})", peer_id) + } + BatchState::AwaitingValidation(Attempt { peer_id, hash: _ }) => { + write!(f, "AwaitingValidation({})", peer_id) + } BatchState::AwaitingDownload => f.write_str("AwaitingDownload"), BatchState::Failed => f.write_str("Failed"), - BatchState::AwaitingProcessing(ref peer, ref blocks, _) => { + BatchState::AwaitingProcessing(peer, blocks, _) => { write!(f, "AwaitingProcessing({}, {} blocks)", peer, blocks.len()) } BatchState::Downloading(request_id) => { diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 90d1bf66211..27f071f0815 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,16 +1,16 @@ -use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; use super::RangeSyncType; +use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; use crate::metrics; use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::{RangeRequestId, RpcRequestSendError, RpcResponseError}; -use crate::sync::{network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult}; -use beacon_chain::block_verification_types::RpcBlock; +use crate::sync::{BatchOperationOutcome, BatchProcessResult, network_context::SyncNetworkContext}; use beacon_chain::BeaconChainTypes; +use beacon_chain::block_verification_types::RpcBlock; use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerAction, PeerId}; use logging::crit; -use std::collections::{btree_map::Entry, BTreeMap, HashSet}; +use std::collections::{BTreeMap, HashSet, btree_map::Entry}; use strum::IntoStaticStr; use tracing::{debug, warn}; use types::{ColumnIndex, Epoch, EthSpec, Hash256, Slot}; @@ -311,43 +311,41 @@ impl SyncingChain { // // First try our optimistic start, if any. If this batch is ready, we process it. If the // batch has not already been completed, check the current chain target. - if let Some(epoch) = self.optimistic_start { - if let Some(batch) = self.batches.get(&epoch) { - let state = batch.state(); - match state { - BatchState::AwaitingProcessing(..) => { - // this batch is ready - debug!(%epoch, "Processing optimistic start"); - return self.process_batch(network, epoch); - } - BatchState::Downloading(..) => { - // The optimistic batch is being downloaded. We wait for this before - // attempting to process other batches. - return Ok(KeepChain); - } - BatchState::Poisoned => unreachable!("Poisoned batch"), - BatchState::Processing(_) - | BatchState::AwaitingDownload - | BatchState::Failed => { - // these are all inconsistent states: - // - Processing -> `self.current_processing_batch` is None - // - Failed -> non recoverable batch. For an optimistic batch, it should - // have been removed - // - AwaitingDownload -> A recoverable failed batch should have been - // re-requested. - return Err(RemoveChain::WrongChainState(format!( - "Optimistic batch indicates inconsistent chain state: {:?}", - state - ))); - } - BatchState::AwaitingValidation(_) => { - // If an optimistic start is given to the chain after the corresponding - // batch has been requested and processed we can land here. We drop the - // optimistic candidate since we can't conclude whether the batch included - // blocks or not at this point - debug!(batch = %epoch, "Dropping optimistic candidate"); - self.optimistic_start = None; - } + if let Some(epoch) = self.optimistic_start + && let Some(batch) = self.batches.get(&epoch) + { + let state = batch.state(); + match state { + BatchState::AwaitingProcessing(..) => { + // this batch is ready + debug!(%epoch, "Processing optimistic start"); + return self.process_batch(network, epoch); + } + BatchState::Downloading(..) => { + // The optimistic batch is being downloaded. We wait for this before + // attempting to process other batches. + return Ok(KeepChain); + } + BatchState::Poisoned => unreachable!("Poisoned batch"), + BatchState::Processing(_) | BatchState::AwaitingDownload | BatchState::Failed => { + // these are all inconsistent states: + // - Processing -> `self.current_processing_batch` is None + // - Failed -> non recoverable batch. For an optimistic batch, it should + // have been removed + // - AwaitingDownload -> A recoverable failed batch should have been + // re-requested. + return Err(RemoveChain::WrongChainState(format!( + "Optimistic batch indicates inconsistent chain state: {:?}", + state + ))); + } + BatchState::AwaitingValidation(_) => { + // If an optimistic start is given to the chain after the corresponding + // batch has been requested and processed we can land here. We drop the + // optimistic candidate since we can't conclude whether the batch included + // blocks or not at this point + debug!(batch = %epoch, "Dropping optimistic candidate"); + self.optimistic_start = None; } } } @@ -616,7 +614,7 @@ impl SyncingChain { // only for batches awaiting validation can we be sure the last attempt is // right, and thus, that any different attempt is wrong match batch.state() { - BatchState::AwaitingValidation(ref processed_attempt) => { + BatchState::AwaitingValidation(processed_attempt) => { for attempt in batch.attempts() { // The validated batch has been re-processed if attempt.hash != processed_attempt.hash { @@ -662,10 +660,10 @@ impl SyncingChain { BatchState::AwaitingProcessing(..) => {} BatchState::Processing(_) => { debug!(batch = %id, %batch, "Advancing chain while processing a batch"); - if let Some(processing_id) = self.current_processing_batch { - if id <= processing_id { - self.current_processing_batch = None; - } + if let Some(processing_id) = self.current_processing_batch + && id <= processing_id + { + self.current_processing_batch = None; } } } @@ -680,11 +678,12 @@ impl SyncingChain { // won't have this batch, so we need to request it. self.to_be_downloaded += EPOCHS_PER_BATCH; } - if let Some(epoch) = self.optimistic_start { - if epoch <= validating_epoch { - self.optimistic_start = None; - } + if let Some(epoch) = self.optimistic_start + && epoch <= validating_epoch + { + self.optimistic_start = None; } + debug!( previous_start = %old_start, new_start = %self.start_epoch, @@ -962,10 +961,10 @@ impl SyncingChain { return Err(RemoveChain::ChainFailed { blacklist, failing_batch: batch_id, - }) + }); } BatchOperationOutcome::Continue => { - return self.send_batch(network, batch_id) + return self.send_batch(network, batch_id); } } } @@ -1088,7 +1087,7 @@ impl SyncingChain { ) -> bool { if network.chain.spec.is_peer_das_enabled_for_epoch(epoch) { // Require peers on all sampling column subnets before sending batches - let peers_on_all_custody_subnets = network + network .network_globals() .sampling_subnets() .iter() @@ -1102,8 +1101,7 @@ impl SyncingChain { }) .count(); peer_count > 0 - }); - peers_on_all_custody_subnets + }) } else { true } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 9f500c61e0b..1d57ee6c3dc 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -9,13 +9,13 @@ use crate::metrics; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::{BeaconChain, BeaconChainTypes}; use fnv::FnvHashMap; -use lighthouse_network::service::api_types::Id; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; +use lighthouse_network::service::api_types::Id; use logging::crit; use smallvec::SmallVec; -use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::collections::hash_map::Entry; use std::sync::Arc; use tracing::{debug, error}; use types::EthSpec; @@ -93,7 +93,7 @@ impl ChainCollection { if let Some(index) = syncing_head_ids .iter() .enumerate() - .find(|(_, &chain_id)| &chain_id == id) + .find(|&(_, &chain_id)| &chain_id == id) .map(|(i, _)| i) { // a syncing head chain was removed diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 9259db2db6f..465edd3697f 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -44,8 +44,8 @@ use super::chain_collection::{ChainCollection, SyncChainStatus}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::status::ToStatusMessage; -use crate::sync::network_context::{RpcResponseError, SyncNetworkContext}; use crate::sync::BatchProcessResult; +use crate::sync::network_context::{RpcResponseError, SyncNetworkContext}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::GoodbyeReason; @@ -336,15 +336,16 @@ where debug!(id = chain.id(), ?sync_type, reason = ?remove_reason, op, "Chain removed"); } - if let RemoveChain::ChainFailed { blacklist, .. } = remove_reason { - if RangeSyncType::Finalized == sync_type && blacklist { - warn!( - id = chain.id(), - "Chain failed! Syncing to its head won't be retried for at least the next {} seconds", - FAILED_CHAINS_EXPIRY_SECONDS - ); - self.failed_chains.insert(chain.target_head_root); - } + if let RemoveChain::ChainFailed { blacklist, .. } = remove_reason + && RangeSyncType::Finalized == sync_type + && blacklist + { + warn!( + id = chain.id(), + "Chain failed! Syncing to its head won't be retried for at least the next {} seconds", + FAILED_CHAINS_EXPIRY_SECONDS + ); + self.failed_chains.insert(chain.target_head_root); } metrics::inc_counter_vec_by( diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 0dcc29ef586..6cfe7a82a77 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1,12 +1,12 @@ +use crate::NetworkMessage; use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::sync::block_lookups::{ BlockLookupSummary, PARENT_DEPTH_TOLERANCE, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, }; use crate::sync::{ - manager::{BlockProcessType, BlockProcessingResult, SyncManager}, SyncMessage, + manager::{BlockProcessType, BlockProcessingResult, SyncManager}, }; -use crate::NetworkMessage; use std::sync::Arc; use std::time::Duration; @@ -15,36 +15,36 @@ use super::*; use crate::sync::block_lookups::common::ResponseType; use beacon_chain::observed_data_sidecars::Observe; use beacon_chain::{ + AvailabilityPendingExecutedBlock, AvailabilityProcessingStatus, BlockError, + PayloadVerificationOutcome, PayloadVerificationStatus, blob_verification::GossipVerifiedBlob, block_verification_types::{AsBlock, BlockImportData}, data_availability_checker::Availability, test_utils::{ - generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, - BeaconChainHarness, EphemeralHarnessType, NumBlobs, + BeaconChainHarness, EphemeralHarnessType, NumBlobs, generate_rand_block_and_blobs, + generate_rand_block_and_data_columns, test_spec, }, validator_monitor::timestamp_now, - AvailabilityPendingExecutedBlock, AvailabilityProcessingStatus, BlockError, - PayloadVerificationOutcome, PayloadVerificationStatus, }; use beacon_processor::WorkEvent; use lighthouse_network::discovery::CombinedKey; use lighthouse_network::{ + NetworkConfig, NetworkGlobals, PeerId, rpc::{RPCError, RequestType, RpcErrorResponse}, service::api_types::{ AppRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, }, types::SyncState, - NetworkConfig, NetworkGlobals, PeerId, }; use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; use tracing::info; use types::{ - data_column_sidecar::ColumnIndex, - test_utils::{SeedableRng, TestRandom, XorShiftRng}, BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, + data_column_sidecar::ColumnIndex, + test_utils::{SeedableRng, TestRandom, XorShiftRng}, }; const D: Duration = Duration::new(0, 0); @@ -104,6 +104,7 @@ impl TestRig { let spec = chain.spec.clone(); // deterministic seed + let rng_08 = ::from_seed([0u8; 32]); let rng = ChaCha20Rng::from_seed([0u8; 32]); init_tracing(); @@ -114,6 +115,7 @@ impl TestRig { network_rx, network_rx_queue: vec![], sync_rx, + rng_08, rng, network_globals: beacon_processor.network_globals.clone(), sync_manager: SyncManager::new( @@ -348,7 +350,7 @@ impl TestRig { } fn determinstic_key(&mut self) -> CombinedKey { - k256::ecdsa::SigningKey::random(&mut self.rng).into() + k256::ecdsa::SigningKey::random(&mut self.rng_08).into() } pub fn new_connected_peers_for_peerdas(&mut self) { diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index 1cc11e01525..23c14ff63ef 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -1,7 +1,7 @@ +use crate::NetworkMessage; +use crate::sync::SyncMessage; use crate::sync::manager::SyncManager; use crate::sync::range_sync::RangeSyncType; -use crate::sync::SyncMessage; -use crate::NetworkMessage; use beacon_chain::builder::Witness; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_processor::WorkEvent; @@ -65,6 +65,7 @@ struct TestRig { /// Beacon chain harness harness: BeaconChainHarness>, /// `rng` for generating test blocks and blobs. + rng_08: rand_chacha_03::ChaCha20Rng, rng: ChaCha20Rng, fork_name: ForkName, spec: Arc, diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 7c184d3b390..718ddf1c1d7 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -1,19 +1,19 @@ use super::*; use crate::network_beacon_processor::ChainSegmentProcessId; use crate::status::ToStatusMessage; +use crate::sync::SyncMessage; use crate::sync::manager::SLOT_IMPORT_TOLERANCE; use crate::sync::network_context::RangeRequestId; use crate::sync::range_sync::RangeSyncType; -use crate::sync::SyncMessage; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; -use beacon_chain::{block_verification_types::RpcBlock, EngineState, NotifyExecutionLayer}; +use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RpcBlock}; use beacon_processor::WorkType; +use lighthouse_network::rpc::RequestType; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, OldBlocksByRangeRequestV2, StatusMessageV2, }; -use lighthouse_network::rpc::RequestType; use lighthouse_network::service::api_types::{ AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, SyncRequestId, @@ -207,11 +207,12 @@ impl TestRig { return false; } } - if let Some(expected_peer) = request_filter.peer { - if peer != expected_peer { - return false; - } + if let Some(expected_peer) = request_filter.peer + && peer != expected_peer + { + return false; } + true }; diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 78280278e07..f28d8f278a0 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -6,9 +6,9 @@ use state_processing::common::{ }; use std::collections::HashMap; use types::{ + Attestation, BeaconState, BitList, ChainSpec, EthSpec, beacon_state::BeaconStateBase, consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, - Attestation, BeaconState, BitList, ChainSpec, EthSpec, }; pub const PROPOSER_REWARD_DENOMINATOR: u64 = @@ -30,7 +30,7 @@ impl<'a, E: EthSpec> AttMaxCover<'a, E> { total_active_balance: u64, spec: &ChainSpec, ) -> Option { - if let BeaconState::Base(ref base_state) = state { + if let BeaconState::Base(base_state) = state { Self::new_for_base(att, state, base_state, total_active_balance, spec) } else { Self::new_for_altair_or_later(att, state, reward_cache, spec) diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 13ef94c18d7..4f1b8b81fe4 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -2,9 +2,10 @@ use crate::AttestationStats; use itertools::Itertools; use std::collections::{BTreeMap, HashMap, HashSet}; use types::{ + AggregateSignature, Attestation, AttestationData, BeaconState, BitList, BitVector, Checkpoint, + Epoch, EthSpec, Hash256, Slot, Unsigned, attestation::{AttestationBase, AttestationElectra}, - superstruct, AggregateSignature, Attestation, AttestationData, BeaconState, BitList, BitVector, - Checkpoint, Epoch, EthSpec, Hash256, Slot, Unsigned, + superstruct, }; #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs index b36299b51a0..cc8809c43e6 100644 --- a/beacon_node/operation_pool/src/bls_to_execution_changes.rs +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -1,5 +1,5 @@ use state_processing::SigVerifiedOp; -use std::collections::{hash_map::Entry, HashMap, HashSet}; +use std::collections::{HashMap, HashSet, hash_map::Entry}; use std::sync::Arc; use types::{ AbstractExecPayload, BeaconState, ChainSpec, EthSpec, SignedBeaconBlock, diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 642fc51f69d..dd01f568fa3 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -9,7 +9,7 @@ mod reward_cache; mod sync_aggregate_id; pub use crate::bls_to_execution_changes::ReceivedPreCapella; -pub use attestation::{earliest_attestation_validators, AttMaxCover, PROPOSER_REWARD_DENOMINATOR}; +pub use attestation::{AttMaxCover, PROPOSER_REWARD_DENOMINATOR, earliest_attestation_validators}; pub use attestation_storage::{CompactAttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ @@ -25,21 +25,22 @@ use crate::sync_aggregate_id::SyncAggregateId; use attester_slashing::AttesterSlashingMaxCover; use max_cover::maximum_cover; use parking_lot::{RwLock, RwLockWriteGuard}; +use rand::rng; use rand::seq::SliceRandom; -use rand::thread_rng; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ - get_slashable_indices_modular, verify_exit, VerifySignatures, + VerifySignatures, get_slashable_indices_modular, verify_exit, }; use state_processing::{SigVerifiedOp, VerifyOperation}; -use std::collections::{hash_map::Entry, HashMap, HashSet}; +use std::collections::{HashMap, HashSet, hash_map::Entry}; use std::marker::PhantomData; use std::ptr; use types::{ - sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, AbstractExecPayload, - Attestation, AttestationData, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, - Epoch, EthSpec, ProposerSlashing, SignedBeaconBlock, SignedBlsToExecutionChange, - SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, + AbstractExecPayload, Attestation, AttestationData, AttesterSlashing, BeaconState, + BeaconStateError, ChainSpec, Epoch, EthSpec, ProposerSlashing, SignedBeaconBlock, + SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, SyncAggregate, + SyncCommitteeContribution, Validator, sync_aggregate::Error as SyncAggregateError, + typenum::Unsigned, }; type SyncContributions = RwLock>>>; @@ -612,7 +613,7 @@ impl OperationPool { |address_change| address_change.as_inner().clone(), usize::MAX, ); - changes.shuffle(&mut thread_rng()); + changes.shuffle(&mut rng()); changes } @@ -790,11 +791,11 @@ mod release_tests { use super::attestation::earliest_attestation_validators; use super::*; use beacon_chain::test_utils::{ - test_spec, BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, + BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, test_spec, }; use maplit::hashset; use state_processing::epoch_cache::initialize_epoch_cache; - use state_processing::{common::get_attesting_indices_from_state, VerifyOperation}; + use state_processing::{VerifyOperation, common::get_attesting_indices_from_state}; use std::collections::BTreeSet; use std::sync::{Arc, LazyLock}; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 88c8dbbf3c3..4d754534605 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,8 +1,8 @@ +use crate::OpPoolError; +use crate::OperationPool; use crate::attestation_storage::AttestationMap; use crate::bls_to_execution_changes::{BlsToExecutionChanges, ReceivedPreCapella}; use crate::sync_aggregate_id::SyncAggregateId; -use crate::OpPoolError; -use crate::OperationPool; use derivative::Derivative; use parking_lot::RwLock; use ssz::{Decode, Encode}; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index e57b8cf4549..924ff079d6f 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1,7 +1,7 @@ use std::time::Duration; -use clap::{builder::ArgPredicate, crate_version, Arg, ArgAction, ArgGroup, Command}; -use clap_utils::{get_color_style, FLAG_HEADER}; +use clap::{Arg, ArgAction, ArgGroup, Command, builder::ArgPredicate, crate_version}; +use clap_utils::{FLAG_HEADER, get_color_style}; use strum::VariantNames; #[allow(clippy::large_stack_frames)] diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 3ed53b10715..0a7af26505a 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,12 +1,12 @@ -use account_utils::{read_input_from_user, STDIN_INPUTS_FLAG}; +use account_utils::{STDIN_INPUTS_FLAG, read_input_from_user}; +use beacon_chain::TrustedSetup; use beacon_chain::chain_config::{ - DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, - DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, - DEFAULT_RE_ORG_PARENT_THRESHOLD, INVALID_HOLESKY_BLOCK_ROOT, + DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, DEFAULT_RE_ORG_HEAD_THRESHOLD, + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_PARENT_THRESHOLD, + DisallowedReOrgOffsets, INVALID_HOLESKY_BLOCK_ROOT, ReOrgThreshold, }; use beacon_chain::graffiti_calculator::GraffitiOrigin; -use beacon_chain::TrustedSetup; -use clap::{parser::ValueSource, ArgMatches, Id}; +use clap::{ArgMatches, Id, parser::ValueSource}; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use clap_utils::{parse_flag, parse_optional, parse_required}; use client::{ClientConfig, ClientGenesis}; @@ -15,7 +15,7 @@ use environment::RuntimeContext; use execution_layer::DEFAULT_JWT_FILE; use http_api::TlsConfig; use lighthouse_network::ListenAddress; -use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; +use lighthouse_network::{Enr, Multiaddr, NetworkConfig, PeerIdSerialized, multiaddr::Protocol}; use sensitive_url::SensitiveUrl; use std::collections::HashSet; use std::fmt::Debug; @@ -494,14 +494,15 @@ pub fn get_config( ); // Only append network config bootnodes if discovery is not disabled - if !client_config.network.disable_discovery { - if let Some(boot_nodes) = ð2_network_config.boot_enr { - client_config - .network - .boot_nodes_enr - .extend_from_slice(boot_nodes) - } + if !client_config.network.disable_discovery + && let Some(boot_nodes) = ð2_network_config.boot_enr + { + client_config + .network + .boot_nodes_enr + .extend_from_slice(boot_nodes) } + client_config.chain.checkpoint_sync_url_timeout = clap_utils::parse_required::(cli_args, "checkpoint-sync-url-timeout")?; @@ -928,18 +929,18 @@ pub fn parse_listening_addresses(cli_args: &ArgMatches) -> Result match &maybe_ipv4 { Some(first_ipv4_addr) => { return Err(format!( - "When setting the --listen-address option twice, use an IPv4 address and an IPv6 address. \ + "When setting the --listen-address option twice, use an IPv4 address and an IPv6 address. \ Got two IPv4 addresses {first_ipv4_addr} and {v4_addr}" - )); + )); } None => maybe_ipv4 = Some(v4_addr), }, IpAddr::V6(v6_addr) => match &maybe_ipv6 { Some(first_ipv6_addr) => { return Err(format!( - "When setting the --listen-address option twice, use an IPv4 address and an IPv6 address. \ + "When setting the --listen-address option twice, use an IPv4 address and an IPv6 address. \ Got two IPv6 addresses {first_ipv6_addr} and {v6_addr}" - )); + )); } None => maybe_ipv6 = Some(v6_addr), }, @@ -1012,7 +1013,9 @@ pub fn parse_listening_addresses(cli_args: &ArgMatches) -> Result { // A single ipv6 address was provided. Set the ports if cli_args.value_source("port6") == Some(ValueSource::CommandLine) { - warn!("When listening only over IPv6, use the --port flag. The value of --port6 will be ignored."); + warn!( + "When listening only over IPv6, use the --port flag. The value of --port6 will be ignored." + ); } // If we are only listening on ipv6 and the user has specified --port6, lets just use @@ -1026,11 +1029,15 @@ pub fn parse_listening_addresses(cli_args: &ArgMatches) -> Result, _>>()?; if config.trusted_peers.len() >= config.target_peers { - warn!( target_peers = config.target_peers, trusted_peers = config.trusted_peers.len(),"More trusted peers than the target peer limit. This will prevent efficient peer selection criteria."); + warn!( + target_peers = config.target_peers, + trusted_peers = config.trusted_peers.len(), + "More trusted peers than the target peer limit. This will prevent efficient peer selection criteria." + ); } } @@ -1378,7 +1389,7 @@ pub fn set_network_config( let addr_str = format!("{addr}:{port}"); match addr_str.to_socket_addrs() { Err(_e) => { - return Err(format!("Failed to parse or resolve address {addr}.")) + return Err(format!("Failed to parse or resolve address {addr}.")); } Ok(resolved_addresses) => { for socket_addr in resolved_addresses { diff --git a/beacon_node/store/benches/hdiff.rs b/beacon_node/store/benches/hdiff.rs index 2577f03f664..1e295c18a18 100644 --- a/beacon_node/store/benches/hdiff.rs +++ b/beacon_node/store/benches/hdiff.rs @@ -1,10 +1,10 @@ use bls::PublicKeyBytes; -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, criterion_group, criterion_main}; use rand::Rng; use ssz::Decode; use store::{ - hdiff::{HDiff, HDiffBuffer}, StoreConfig, + hdiff::{HDiff, HDiffBuffer}, }; use types::{BeaconState, Epoch, Eth1Data, EthSpec, MainnetEthSpec as E, Validator}; @@ -12,7 +12,7 @@ pub fn all_benches(c: &mut Criterion) { let spec = E::default_spec(); let genesis_time = 0; let eth1_data = Eth1Data::default(); - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let validator_mutations = 1000; let validator_additions = 100; @@ -27,11 +27,11 @@ pub fn all_benches(c: &mut Criterion) { // Change all balances for i in 0..n { let balance = target_state.balances_mut().get_mut(i).unwrap(); - *balance += rng.gen_range(1..=1_000_000); + *balance += rng.random_range(1..=1_000_000); } // And some validator records for _ in 0..validator_mutations { - let index = rng.gen_range(1..n); + let index = rng.random_range(1..n); // TODO: Only change a few things, and not the pubkey *target_state.validators_mut().get_mut(index).unwrap() = rand_validator(&mut rng); } @@ -80,7 +80,7 @@ fn bench_against_states( fn rand_validator(mut rng: impl Rng) -> Validator { let mut pubkey = [0u8; 48]; rng.fill_bytes(&mut pubkey); - let withdrawal_credentials: [u8; 32] = rng.gen(); + let withdrawal_credentials: [u8; 32] = rng.random(); Validator { pubkey: PublicKeyBytes::from_ssz_bytes(&pubkey).unwrap(), @@ -97,7 +97,7 @@ fn rand_validator(mut rng: impl Rng) -> Validator { fn append_validator(state: &mut BeaconState, mut rng: impl Rng) { state .balances_mut() - .push(32_000_000_000 + rng.gen_range(1..=1_000_000_000)) + .push(32_000_000_000 + rng.random_range(1..=1_000_000_000)) .unwrap(); if let Ok(inactivity_scores) = state.inactivity_scores_mut() { inactivity_scores.push(0).unwrap(); diff --git a/beacon_node/store/src/chunked_iter.rs b/beacon_node/store/src/chunked_iter.rs index f2821286ec9..72e5d9c7af0 100644 --- a/beacon_node/store/src/chunked_iter.rs +++ b/beacon_node/store/src/chunked_iter.rs @@ -1,4 +1,4 @@ -use crate::chunked_vector::{chunk_key, Chunk, Field}; +use crate::chunked_vector::{Chunk, Field, chunk_key}; use crate::{HotColdDB, ItemStore}; use tracing::error; use types::{ChainSpec, EthSpec, Slot}; diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 90e8c173100..ee043c14f4e 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -165,17 +165,17 @@ pub trait Field: Copy { if vindex >= start_vindex && vindex < end_vindex { let vector_value = Self::get_value(state, vindex as u64, spec)?; - if let Some(existing_value) = existing_chunk.values.get(i) { - if *existing_value != vector_value && *existing_value != Self::Value::default() - { - return Err(ChunkError::Inconsistent { - field: Self::column(), - chunk_index, - existing_value: format!("{:?}", existing_value), - new_value: format!("{:?}", vector_value), - } - .into()); + if let Some(existing_value) = existing_chunk.values.get(i) + && *existing_value != vector_value + && *existing_value != Self::Value::default() + { + return Err(ChunkError::Inconsistent { + field: Self::column(), + chunk_index, + existing_value: format!("{:?}", existing_value), + new_value: format!("{:?}", vector_value), } + .into()); } new_chunk.values[i] = vector_value; diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index c16573df5e4..e3e33de4f97 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -7,8 +7,8 @@ use ssz_derive::{Decode, Encode}; use std::io::Write; use std::num::NonZeroUsize; use strum::{Display, EnumString, EnumVariantNames}; -use types::non_zero_usize::new_non_zero_usize; use types::EthSpec; +use types::non_zero_usize::new_non_zero_usize; use zstd::Encoder; #[cfg(all(feature = "redb", not(feature = "leveldb")))] diff --git a/beacon_node/store/src/database/interface.rs b/beacon_node/store/src/database/interface.rs index e405c6227d3..5646f1179c8 100644 --- a/beacon_node/store/src/database/interface.rs +++ b/beacon_node/store/src/database/interface.rs @@ -2,8 +2,8 @@ use crate::database::leveldb_impl; #[cfg(feature = "redb")] use crate::database::redb_impl; -use crate::{config::DatabaseBackend, KeyValueStoreOp, StoreConfig}; -use crate::{metrics, ColumnIter, ColumnKeyIter, DBColumn, Error, ItemStore, Key, KeyValueStore}; +use crate::{ColumnIter, ColumnKeyIter, DBColumn, Error, ItemStore, Key, KeyValueStore, metrics}; +use crate::{KeyValueStoreOp, StoreConfig, config::DatabaseBackend}; use std::collections::HashSet; use std::path::Path; use types::EthSpec; diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 54d71750898..385f35a33d7 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -1,14 +1,14 @@ -use crate::hot_cold_store::{BytesKey, HotColdDBError}; use crate::Key; +use crate::hot_cold_store::{BytesKey, HotColdDBError}; use crate::{ - get_key_for_col, metrics, ColumnIter, ColumnKeyIter, DBColumn, Error, KeyValueStoreOp, + ColumnIter, ColumnKeyIter, DBColumn, Error, KeyValueStoreOp, get_key_for_col, metrics, }; use leveldb::{ compaction::Compaction, database::{ + Database, batch::{Batch, Writebatch}, kv::KV, - Database, }, iterator::{Iterable, LevelDBIterator}, options::{Options, ReadOptions}, diff --git a/beacon_node/store/src/database/redb_impl.rs b/beacon_node/store/src/database/redb_impl.rs index 82c4b20aaf2..4077326ecaf 100644 --- a/beacon_node/store/src/database/redb_impl.rs +++ b/beacon_node/store/src/database/redb_impl.rs @@ -1,4 +1,4 @@ -use crate::{metrics, ColumnIter, ColumnKeyIter, Key}; +use crate::{ColumnIter, ColumnKeyIter, Key, metrics}; use crate::{DBColumn, Error, KeyValueStoreOp}; use parking_lot::RwLock; use redb::TableDefinition; diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index eb1fb647187..51b4bfef830 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -1,12 +1,12 @@ use crate::chunked_vector::ChunkError; use crate::config::StoreConfigError; use crate::hot_cold_store::{HotColdDBError, StateSummaryIteratorError}; -use crate::{hdiff, DBColumn}; +use crate::{DBColumn, hdiff}; #[cfg(feature = "leveldb")] use leveldb::error::Error as LevelDBError; use ssz::DecodeError; use state_processing::BlockReplayError; -use types::{milhouse, BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot}; +use types::{BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot, milhouse}; pub type Result = std::result::Result; diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index 5731ebcbe0e..de62d42c592 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -1,5 +1,5 @@ //! Hierarchical diff implementation. -use crate::{metrics, DBColumn, StoreConfig, StoreItem}; +use crate::{DBColumn, StoreConfig, StoreItem, metrics}; use bls::PublicKeyBytes; use itertools::Itertools; use serde::{Deserialize, Serialize}; @@ -809,7 +809,7 @@ impl StorageStrategy { #[cfg(test)] mod tests { use super::*; - use rand::{rngs::SmallRng, thread_rng, Rng, SeedableRng}; + use rand::{Rng, SeedableRng, rng, rngs::SmallRng}; #[test] fn default_storage_strategy() { @@ -914,7 +914,7 @@ mod tests { fn compressed_validators_diff() { assert_eq!(::ssz_fixed_len(), 129); - let mut rng = thread_rng(); + let mut rng = rng(); let config = &StoreConfig::default(); let xs = (0..10) .map(|_| rand_validator(&mut rng)) @@ -932,7 +932,7 @@ mod tests { fn rand_validator(mut rng: impl Rng) -> Validator { let mut pubkey = [0u8; 48]; rng.fill_bytes(&mut pubkey); - let withdrawal_credentials: [u8; 32] = rng.gen(); + let withdrawal_credentials: [u8; 32] = rng.random(); Validator { pubkey: PublicKeyBytes::from_ssz_bytes(&pubkey).unwrap(), diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 355f95f5576..8116596aa09 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -6,19 +6,19 @@ use crate::historic_state_cache::HistoricStateCache; use crate::iter::{BlockRootsIterator, ParentRootBlockIterator, RootsIterator}; use crate::memory_store::MemoryStore; use crate::metadata::{ - AnchorInfo, BlobInfo, CompactionTimestamp, DataColumnCustodyInfo, DataColumnInfo, - SchemaVersion, ANCHOR_INFO_KEY, ANCHOR_UNINITIALIZED, BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, - CONFIG_KEY, CURRENT_SCHEMA_VERSION, DATA_COLUMN_CUSTODY_INFO_KEY, DATA_COLUMN_INFO_KEY, - SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, + ANCHOR_INFO_KEY, ANCHOR_UNINITIALIZED, AnchorInfo, BLOB_INFO_KEY, BlobInfo, + COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, CompactionTimestamp, + DATA_COLUMN_CUSTODY_INFO_KEY, DATA_COLUMN_INFO_KEY, DataColumnCustodyInfo, DataColumnInfo, + SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, SchemaVersion, }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_data_column_key, + BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, StoreItem, + StoreOp, get_data_column_key, metrics::{self, COLD_METRIC, HOT_METRIC}, - parse_data_column_key, BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error, ItemStore, - KeyValueStoreOp, StoreItem, StoreOp, + parse_data_column_key, }; -use itertools::{process_results, Itertools}; +use itertools::{Itertools, process_results}; use lru::LruCache; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; @@ -26,10 +26,10 @@ use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ - block_replayer::PreSlotHook, AllCaches, BlockProcessingError, BlockReplayer, - SlotProcessingError, + AllCaches, BlockProcessingError, BlockReplayer, SlotProcessingError, + block_replayer::PreSlotHook, }; -use std::cmp::{min, Ordering}; +use std::cmp::{Ordering, min}; use std::collections::{HashMap, HashSet}; use std::io::{Read, Write}; use std::marker::PhantomData; @@ -3641,15 +3641,15 @@ pub fn get_ancestor_state_root<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStor .ok_or(StateSummaryIteratorError::MissingSummary(state_root))?; // Protect against infinite loops if the state summaries are not strictly descending - if let Some(previous_slot) = previous_slot { - if state_summary.slot >= previous_slot { - drop(split); - return Err(StateSummaryIteratorError::CircularSummaries { - state_root, - state_slot: state_summary.slot, - previous_slot, - }); - } + if let Some(previous_slot) = previous_slot + && state_summary.slot >= previous_slot + { + drop(split); + return Err(StateSummaryIteratorError::CircularSummaries { + state_root, + state_slot: state_summary.slot, + previous_slot, + }); } previous_slot = Some(state_summary.slot); diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 47c5a1d9d82..88d509731c8 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -3,8 +3,8 @@ use crate::{Error, HotColdDB, ItemStore}; use std::borrow::Cow; use std::marker::PhantomData; use types::{ - typenum::Unsigned, BeaconState, BeaconStateError, BlindedPayload, EthSpec, Hash256, - SignedBeaconBlock, Slot, + BeaconState, BeaconStateError, BlindedPayload, EthSpec, Hash256, SignedBeaconBlock, Slot, + typenum::Unsigned, }; /// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over. diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index a87d4f7f3f9..6baef61c9d8 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,6 +1,6 @@ use crate::{ - errors::Error as DBError, get_key_for_col, hot_cold_store::BytesKey, ColumnIter, ColumnKeyIter, - DBColumn, Error, ItemStore, Key, KeyValueStore, KeyValueStoreOp, + ColumnIter, ColumnKeyIter, DBColumn, Error, ItemStore, Key, KeyValueStore, KeyValueStoreOp, + errors::Error as DBError, get_key_for_col, hot_cold_store::BytesKey, }; use parking_lot::RwLock; use std::collections::{BTreeMap, HashSet}; diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index e04e6628652..93c9840586e 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -205,7 +205,7 @@ pub static BEACON_HDIFF_BUFFER_APPLY_RESIZES: LazyLock> = Lazy try_create_histogram_with_buckets( "store_hdiff_buffer_apply_resizes", "Number of times during diff application that the output buffer had to be resized before decoding succeeded", - Ok(vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) + Ok(vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0]), ) }); // This metric is not split hot/cold because both databases use the same hierarchy config anyway diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index fdd1880f559..3ad6f1b94c6 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -1,6 +1,6 @@ use crate::chunked_vector::{ - load_variable_list_from_db, load_vector_from_db, BlockRootsChunked, HistoricalRoots, - HistoricalSummaries, RandaoMixes, StateRootsChunked, + BlockRootsChunked, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRootsChunked, + load_variable_list_from_db, load_vector_from_db, }; use crate::{DBColumn, Error, KeyValueStore, KeyValueStoreOp}; use ssz::{Decode, DecodeError, Encode}; @@ -232,13 +232,12 @@ impl PartialBeaconState { spec: &ChainSpec, ) -> Result<(), Error> { let slot = self.slot(); - if let Ok(historical_summaries) = self.historical_summaries_mut() { - if historical_summaries.is_none() { - *historical_summaries = - Some(load_variable_list_from_db::( - store, slot, spec, - )?); - } + if let Ok(historical_summaries) = self.historical_summaries_mut() + && historical_summaries.is_none() + { + *historical_summaries = Some(load_variable_list_from_db::( + store, slot, spec, + )?); } Ok(()) } diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 4bd8f12ead7..7aca692ef9b 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -2,10 +2,10 @@ use crate::hot_cold_store::{HotColdDB, HotColdDBError}; use crate::metrics; use crate::{Error, ItemStore}; -use itertools::{process_results, Itertools}; +use itertools::{Itertools, process_results}; use state_processing::{ - per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext, - VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, per_block_processing, + per_slot_processing, }; use std::sync::Arc; use tracing::{debug, info}; diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 352760e808e..05930c7b71e 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -1,7 +1,7 @@ use crate::hdiff::HDiffBuffer; use crate::{ - metrics::{self, HOT_METRIC}, Error, + metrics::{self, HOT_METRIC}, }; use lru::LruCache; use std::collections::{BTreeMap, HashMap, HashSet}; @@ -189,11 +189,12 @@ impl StateCache { // Do not attempt to rebase states prior to the finalized state. This method might be called // with states on the hdiff grid prior to finalization, as part of the reconstruction of // some later unfinalized state. - if let Some(finalized_state) = &self.finalized_state { - if state.slot() >= finalized_state.state.slot() { - state.rebase_on(&finalized_state.state, spec)?; - } + if let Some(finalized_state) = &self.finalized_state + && state.slot() >= finalized_state.state.slot() + { + state.rebase_on(&finalized_state.state, spec)?; } + Ok(()) } @@ -259,10 +260,10 @@ impl StateCache { } pub fn get_by_state_root(&mut self, state_root: Hash256) -> Option> { - if let Some(ref finalized_state) = self.finalized_state { - if state_root == finalized_state.state_root { - return Some(finalized_state.state.clone()); - } + if let Some(ref finalized_state) = self.finalized_state + && state_root == finalized_state.state_root + { + return Some(finalized_state.state.clone()); } self.states.get(&state_root).map(|(_, state)| state.clone()) } @@ -270,10 +271,10 @@ impl StateCache { pub fn put_hdiff_buffer(&mut self, state_root: Hash256, slot: Slot, buffer: &HDiffBuffer) { // Only accept HDiffBuffers prior to finalization. Later states should be stored as proper // states, not HDiffBuffers. - if let Some(finalized_state) = &self.finalized_state { - if slot >= finalized_state.state.slot() { - return; - } + if let Some(finalized_state) = &self.finalized_state + && slot >= finalized_state.state.slot() + { + return; } self.hdiff_buffers.put(state_root, slot, buffer.clone()); } diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index ab78b65ae93..fa1b902a0f6 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -2,9 +2,10 @@ use beacon_chain::StateSkipConfig; use node_test_rig::{ + LocalBeaconNode, environment::{Environment, EnvironmentBuilder}, eth2::types::StateId, - testing_client_config, LocalBeaconNode, + testing_client_config, }; use types::{EthSpec, MinimalEthSpec, Slot}; diff --git a/boot_node/src/cli.rs b/boot_node/src/cli.rs index 0f274885d17..301363afe8b 100644 --- a/boot_node/src/cli.rs +++ b/boot_node/src/cli.rs @@ -1,7 +1,7 @@ //! Simple logic for spawning a Lighthouse BootNode. use clap::{Arg, ArgAction, Command}; -use clap_utils::{get_color_style, FLAG_HEADER}; +use clap_utils::{FLAG_HEADER, get_color_style}; // TODO: Add DOS prevention CLI params pub fn cli_app() -> Command { diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index c43a8b397b1..1cb4e343813 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -2,10 +2,11 @@ use beacon_node::{get_data_dir, set_network_config}; use bytes::Bytes; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; -use lighthouse_network::discv5::{self, enr::CombinedKey, Enr}; +use lighthouse_network::discv5::{self, Enr, enr::CombinedKey}; use lighthouse_network::{ + CombinedKeyExt, NetworkConfig, discovery::{load_enr_from_disk, use_or_load_enr}, - load_private_key, CombinedKeyExt, NetworkConfig, + load_private_key, }; use serde::{Deserialize, Serialize}; use ssz::Encode; @@ -56,26 +57,30 @@ impl BootNodeConfig { set_network_config(&mut network_config, matches, &data_dir)?; // Set the Enr Discovery ports to the listening ports if not present. - if let Some(listening_addr_v4) = network_config.listen_addrs().v4() { - if network_config.enr_udp4_port.is_none() { - network_config.enr_udp4_port = - Some(network_config.enr_udp4_port.unwrap_or( - listening_addr_v4.disc_port.try_into().map_err(|_| { - "boot node enr-udp-port not set and listening port is zero" - })?, - )) - } + if let Some(listening_addr_v4) = network_config.listen_addrs().v4() + && network_config.enr_udp4_port.is_none() + { + network_config.enr_udp4_port = Some( + network_config.enr_udp4_port.unwrap_or( + listening_addr_v4 + .disc_port + .try_into() + .map_err(|_| "boot node enr-udp-port not set and listening port is zero")?, + ), + ) }; - if let Some(listening_addr_v6) = network_config.listen_addrs().v6() { - if network_config.enr_udp6_port.is_none() { - network_config.enr_udp6_port = - Some(network_config.enr_udp6_port.unwrap_or( - listening_addr_v6.disc_port.try_into().map_err(|_| { - "boot node enr-udp-port not set and listening port is zero" - })?, - )) - } + if let Some(listening_addr_v6) = network_config.listen_addrs().v6() + && network_config.enr_udp6_port.is_none() + { + network_config.enr_udp6_port = Some( + network_config.enr_udp6_port.unwrap_or( + listening_addr_v6 + .disc_port + .try_into() + .map_err(|_| "boot node enr-udp-port not set and listening port is zero")?, + ), + ) }; // By default this is enabled. If it is not set, revert to false. diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index d96ac0c726f..5bd4ef10a40 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -5,8 +5,8 @@ use crate::config::BootNodeConfigSerialization; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; use lighthouse_network::{ - discv5::{self, enr::NodeId, Discv5}, EnrExt, Eth2Enr, + discv5::{self, Discv5, enr::NodeId}, }; use tracing::{info, warn}; use types::EthSpec; @@ -77,10 +77,10 @@ pub async fn run( node_id = ?enr.node_id(), "Adding bootnode" ); - if enr != local_enr { - if let Err(e) = discv5.add_enr(enr) { - warn!(error = ?e, "Failed adding ENR"); - } + if enr != local_enr + && let Err(e) = discv5.add_enr(enr) + { + warn!(error = ?e, "Failed adding ENR"); } } diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index 0f576efb3ab..806c9338d51 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -3,11 +3,11 @@ use eth2_keystore::Keystore; use eth2_wallet::{ - bip39::{Language, Mnemonic, MnemonicType}, Wallet, + bip39::{Language, Mnemonic, MnemonicType}, }; -use filesystem::{create_with_600_perms, Error as FsError}; -use rand::{distributions::Alphanumeric, Rng}; +use filesystem::{Error as FsError, create_with_600_perms}; +use rand::{Rng, distr::Alphanumeric}; use std::fs::{self, File}; use std::io; use std::io::prelude::*; @@ -115,7 +115,7 @@ pub fn random_password_string() -> Zeroizing { /// Common implementation for `random_password` and `random_password_string`. fn random_password_raw_string() -> String { - rand::thread_rng() + rand::rng() .sample_iter(&Alphanumeric) .take(DEFAULT_PASSWORD_LEN) .map(char::from) diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 5f32645c928..596d50de420 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -8,11 +8,11 @@ use eth2_keystore::Keystore; use regex::Regex; use serde::{Deserialize, Serialize}; use std::collections::HashSet; -use std::fs::{self, create_dir_all, File}; +use std::fs::{self, File, create_dir_all}; use std::io; use std::path::{Path, PathBuf}; use tracing::error; -use types::{graffiti::GraffitiString, Address, PublicKey}; +use types::{Address, PublicKey, graffiti::GraffitiString}; use validator_dir::VOTING_KEYSTORE_FILE; use zeroize::Zeroizing; diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index a4b5f4dc1c4..bc904c78e35 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -1,8 +1,8 @@ //! A helper library for parsing values from `clap::ArgMatches`. -use clap::builder::styling::*; use clap::ArgMatches; -use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; +use clap::builder::styling::*; +use eth2_network_config::{DEFAULT_HARDCODED_NETWORK, Eth2NetworkConfig}; use ssz::Decode; use std::path::PathBuf; use std::str::FromStr; diff --git a/common/compare_fields_derive/src/lib.rs b/common/compare_fields_derive/src/lib.rs index 1a89ccf4fdf..35299707214 100644 --- a/common/compare_fields_derive/src/lib.rs +++ b/common/compare_fields_derive/src/lib.rs @@ -1,6 +1,6 @@ use proc_macro::TokenStream; use quote::quote; -use syn::{parse_macro_input, DeriveInput}; +use syn::{DeriveInput, parse_macro_input}; fn is_iter(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { diff --git a/common/deposit_contract/src/lib.rs b/common/deposit_contract/src/lib.rs index 5b54a05396a..7d58240f11b 100644 --- a/common/deposit_contract/src/lib.rs +++ b/common/deposit_contract/src/lib.rs @@ -86,8 +86,8 @@ pub fn decode_eth1_tx_data( mod tests { use super::*; use types::{ - test_utils::generate_deterministic_keypair, ChainSpec, EthSpec, Keypair, MinimalEthSpec, - Signature, + ChainSpec, EthSpec, Keypair, MinimalEthSpec, Signature, + test_utils::generate_deterministic_keypair, }; type E = MinimalEthSpec; diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index a129f9c4fa5..3323db53dcf 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -24,13 +24,13 @@ use libp2p_identity::PeerId; use pretty_reqwest_error::PrettyReqwestError; pub use reqwest; use reqwest::{ - header::{HeaderMap, HeaderValue}, Body, IntoUrl, RequestBuilder, Response, + header::{HeaderMap, HeaderValue}, }; pub use reqwest::{StatusCode, Url}; use reqwest_eventsource::{Event, EventSource}; pub use sensitive_url::{SensitiveError, SensitiveUrl}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::{Serialize, de::DeserializeOwned}; use ssz::Encode; use std::fmt; use std::future::Future; @@ -2755,7 +2755,7 @@ impl BeaconNodeHttpClient { pub async fn get_events( &self, topic: &[EventTopic], - ) -> Result, Error>>, Error> { + ) -> Result, Error>> + use, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 24fb110a358..4349b487966 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -6,9 +6,9 @@ mod block_rewards; pub mod sync_state; use crate::{ + BeaconNodeHttpClient, DepositData, Error, Hash256, Slot, lighthouse::sync_state::SyncState, types::{AdminPeer, Epoch, GenericResponse, ValidatorId}, - BeaconNodeHttpClient, DepositData, Error, Hash256, Slot, }; use proto_array::core::ProtoArray; use serde::{Deserialize, Serialize}; diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 1d1abcac791..60289605531 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -1,11 +1,11 @@ use super::types::*; use crate::Error; use reqwest::{ - header::{HeaderMap, HeaderValue}, IntoUrl, + header::{HeaderMap, HeaderValue}, }; use sensitive_url::SensitiveUrl; -use serde::{de::DeserializeOwned, Serialize}; +use serde::{Serialize, de::DeserializeOwned}; use std::fmt::{self, Display}; use std::fs; use std::path::Path; diff --git a/common/eth2/src/mixin.rs b/common/eth2/src/mixin.rs index a33cf8a40c2..c26f4f15b6f 100644 --- a/common/eth2/src/mixin.rs +++ b/common/eth2/src/mixin.rs @@ -1,5 +1,5 @@ -use crate::{types::Accept, Error, CONSENSUS_VERSION_HEADER}; -use reqwest::{header::ACCEPT, RequestBuilder, Response, StatusCode}; +use crate::{CONSENSUS_VERSION_HEADER, Error, types::Accept}; +use reqwest::{RequestBuilder, Response, StatusCode, header::ACCEPT}; use std::str::FromStr; use types::ForkName; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 07b5cb50166..54bea22a621 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -2,11 +2,11 @@ //! required for the HTTP API. use crate::{ - Error as ServerError, CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, - EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, + CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, EXECUTION_PAYLOAD_BLINDED_HEADER, + EXECUTION_PAYLOAD_VALUE_HEADER, Error as ServerError, }; use enr::{CombinedKey, Enr}; -use mediatype::{names, MediaType, MediaTypeList}; +use mediatype::{MediaType, MediaTypeList, names}; use multiaddr::Multiaddr; use reqwest::header::HeaderMap; use serde::{Deserialize, Deserializer, Serialize}; @@ -1115,7 +1115,7 @@ impl<'de> ContextDeserialize<'de, ForkName> for SsePayloadAttributes { return Err(serde::de::Error::custom(format!( "SsePayloadAttributes failed to deserialize: unsupported fork '{}'", context - ))) + ))); } ForkName::Bellatrix => { Self::V1(Deserialize::deserialize(deserializer).map_err(convert_err)?) @@ -1565,7 +1565,7 @@ pub struct BroadcastValidationQuery { pub mod serde_status_code { use crate::StatusCode; - use serde::{de::Error, Deserialize, Serialize}; + use serde::{Deserialize, Serialize, de::Error}; pub fn serialize(status_code: &StatusCode, ser: S) -> Result where diff --git a/common/eth2_network_config/build.rs b/common/eth2_network_config/build.rs index 3165930f4a8..c1f5df45599 100644 --- a/common/eth2_network_config/build.rs +++ b/common/eth2_network_config/build.rs @@ -1,6 +1,6 @@ //! Extracts zipped genesis states on first run. use eth2_config::{ - Eth2NetArchiveAndDirectory, GenesisStateSource, ETH2_NET_DIRS, GENESIS_FILE_NAME, + ETH2_NET_DIRS, Eth2NetArchiveAndDirectory, GENESIS_FILE_NAME, GenesisStateSource, }; use std::fs::File; use std::io; diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index ac488ed2a3f..12de21239a0 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -13,13 +13,13 @@ use bytes::Bytes; use discv5::enr::{CombinedKey, Enr}; -use eth2_config::{instantiate_hardcoded_nets, HardcodedNet}; +use eth2_config::{HardcodedNet, instantiate_hardcoded_nets}; use kzg::trusted_setup::get_trusted_setup; use pretty_reqwest_error::PrettyReqwestError; use reqwest::{Client, Error}; use sensitive_url::SensitiveUrl; use sha2::{Digest, Sha256}; -use std::fs::{create_dir_all, File}; +use std::fs::{File, create_dir_all}; use std::io::{Read, Write}; use std::path::PathBuf; use std::str::FromStr; diff --git a/common/eth2_wallet_manager/src/filesystem.rs b/common/eth2_wallet_manager/src/filesystem.rs index 131b218c7c8..26e725a8ae0 100644 --- a/common/eth2_wallet_manager/src/filesystem.rs +++ b/common/eth2_wallet_manager/src/filesystem.rs @@ -2,7 +2,7 @@ use eth2_wallet::Error as WalletError; use eth2_wallet::{Uuid, Wallet}; -use std::fs::{copy as copy_file, remove_file, File}; +use std::fs::{File, copy as copy_file, remove_file}; use std::io; use std::path::{Path, PathBuf}; diff --git a/common/eth2_wallet_manager/src/locked_wallet.rs b/common/eth2_wallet_manager/src/locked_wallet.rs index 2af863a4bfd..308fe3de908 100644 --- a/common/eth2_wallet_manager/src/locked_wallet.rs +++ b/common/eth2_wallet_manager/src/locked_wallet.rs @@ -1,6 +1,6 @@ use crate::{ - filesystem::{read, update}, Error, + filesystem::{read, update}, }; use eth2_wallet::{Uuid, ValidatorKeystores, Wallet}; use lockfile::Lockfile; diff --git a/common/eth2_wallet_manager/src/wallet_manager.rs b/common/eth2_wallet_manager/src/wallet_manager.rs index c988ca4135e..dea94435ce1 100644 --- a/common/eth2_wallet_manager/src/wallet_manager.rs +++ b/common/eth2_wallet_manager/src/wallet_manager.rs @@ -1,12 +1,12 @@ use crate::{ - filesystem::{create, Error as FilesystemError}, LockedWallet, + filesystem::{Error as FilesystemError, create}, }; -use eth2_wallet::{bip39::Mnemonic, Error as WalletError, Uuid, Wallet, WalletBuilder}; +use eth2_wallet::{Error as WalletError, Uuid, Wallet, WalletBuilder, bip39::Mnemonic}; use lockfile::LockfileError; use std::collections::HashMap; use std::ffi::OsString; -use std::fs::{create_dir_all, read_dir, File}; +use std::fs::{File, create_dir_all, read_dir}; use std::io; use std::path::{Path, PathBuf}; diff --git a/common/filesystem/src/lib.rs b/common/filesystem/src/lib.rs index d73b7a355b1..9b96e545bda 100644 --- a/common/filesystem/src/lib.rs +++ b/common/filesystem/src/lib.rs @@ -95,7 +95,7 @@ pub fn restrict_file_permissions>(path: P) -> Result<(), Error> { #[cfg(windows)] { use winapi::um::winnt::PSID; - use windows_acl::acl::{AceType, ACL}; + use windows_acl::acl::{ACL, AceType}; use windows_acl::helper::sid_to_string; let path_str = path diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 5c4de1fd61c..6722381dbac 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -1,4 +1,4 @@ -use metrics::{try_create_int_counter, IntCounter, Result as MetricsResult}; +use metrics::{IntCounter, Result as MetricsResult, try_create_int_counter}; use std::sync::LazyLock; use std::time::{Duration, Instant}; use tracing_subscriber::EnvFilter; @@ -14,7 +14,7 @@ mod utils; pub use sse_logging_components::SSELoggingComponents; pub use tracing_libp2p_discv5_logging_layer::{ - create_libp2p_discv5_tracing_layer, Libp2pDiscv5TracingLayer, + Libp2pDiscv5TracingLayer, create_libp2p_discv5_tracing_layer, }; pub use tracing_logging_layer::LoggingLayer; pub use tracing_metrics_layer::MetricsLayer; diff --git a/common/logging/src/sse_logging_components.rs b/common/logging/src/sse_logging_components.rs index d526f2b040d..66567704f80 100644 --- a/common/logging/src/sse_logging_components.rs +++ b/common/logging/src/sse_logging_components.rs @@ -1,8 +1,8 @@ //! This module provides an implementation of `tracing_subscriber::layer::Layer` that optionally writes to a channel if //! there are subscribers to a HTTP SSE stream. -use serde_json::json; use serde_json::Value; +use serde_json::json; use std::sync::Arc; use tokio::sync::broadcast::Sender; use tracing::field::{Field, Visit}; @@ -45,13 +45,12 @@ impl Layer for SSELoggingComponents { .get("fields") .and_then(|fields| fields.get("error_type")) .and_then(|val| val.as_str()) + && error_type.eq_ignore_ascii_case("crit") { - if error_type.eq_ignore_ascii_case("crit") { - log_entry["level"] = json!("CRIT"); + log_entry["level"] = json!("CRIT"); - if let Some(Value::Object(ref mut map)) = log_entry.get_mut("fields") { - map.remove("error_type"); - } + if let Some(Value::Object(map)) = log_entry.get_mut("fields") { + map.remove("error_type"); } } @@ -73,9 +72,11 @@ impl TracingEventVisitor { let mut log_entry = serde_json::Map::new(); log_entry.insert( "time".to_string(), - json!(chrono::Local::now() - .format("%b %d %H:%M:%S%.3f") - .to_string()), + json!( + chrono::Local::now() + .format("%b %d %H:%M:%S%.3f") + .to_string() + ), ); log_entry.insert("level".to_string(), json!(metadata.level().to_string())); log_entry.insert("target".to_string(), json!(metadata.target())); diff --git a/common/logging/src/tracing_libp2p_discv5_logging_layer.rs b/common/logging/src/tracing_libp2p_discv5_logging_layer.rs index ef472ddc527..1c34209e497 100644 --- a/common/logging/src/tracing_libp2p_discv5_logging_layer.rs +++ b/common/logging/src/tracing_libp2p_discv5_logging_layer.rs @@ -4,7 +4,7 @@ use std::io::Write; use std::path::PathBuf; use tracing::Subscriber; use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; -use tracing_subscriber::{layer::Context, Layer}; +use tracing_subscriber::{Layer, layer::Context}; pub struct Libp2pDiscv5TracingLayer { pub libp2p_non_blocking_writer: NonBlocking, @@ -65,11 +65,11 @@ pub fn create_libp2p_discv5_tracing_layer( // Ensure that `tracing_log_path` only contains directories. for p in tracing_log_path.clone().iter() { tracing_log_path = tracing_log_path.join(p); - if let Ok(metadata) = tracing_log_path.metadata() { - if !metadata.is_dir() { - tracing_log_path.pop(); - break; - } + if let Ok(metadata) = tracing_log_path.metadata() + && !metadata.is_dir() + { + tracing_log_path.pop(); + break; } } diff --git a/common/logging/src/tracing_logging_layer.rs b/common/logging/src/tracing_logging_layer.rs index 43feb3c86d4..27841cb7d86 100644 --- a/common/logging/src/tracing_logging_layer.rs +++ b/common/logging/src/tracing_logging_layer.rs @@ -3,13 +3,13 @@ use crate::utils::is_ascii_control; use chrono::prelude::*; use serde_json::{Map, Value}; use std::io::Write; +use tracing::Subscriber; use tracing::field::Field; use tracing::span::Id; -use tracing::Subscriber; use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; +use tracing_subscriber::Layer; use tracing_subscriber::layer::Context; use tracing_subscriber::registry::LookupSpan; -use tracing_subscriber::Layer; const FIXED_MESSAGE_WIDTH: usize = 44; const ALIGNED_LEVEL_WIDTH: usize = 5; @@ -405,7 +405,7 @@ fn parse_field(val: &str) -> Value { #[cfg(test)] mod tests { - use crate::tracing_logging_layer::{build_log_text, FieldVisitor}; + use crate::tracing_logging_layer::{FieldVisitor, build_log_text}; use std::io::Write; struct Buffer { diff --git a/common/logging/src/utils.rs b/common/logging/src/utils.rs index 784cd5ca705..64bacf90863 100644 --- a/common/logging/src/utils.rs +++ b/common/logging/src/utils.rs @@ -5,8 +5,8 @@ use workspace_members::workspace_crates; const WORKSPACE_CRATES: &[&str] = workspace_crates!(); /// Constructs a filter which only permits logging from crates which are members of the workspace. -pub fn build_workspace_filter( -) -> Result bool + Clone>, String> { +pub fn build_workspace_filter() +-> Result bool + Clone>, String> { let workspace_crates: HashSet<&str> = WORKSPACE_CRATES.iter().copied().collect(); Ok(tracing_subscriber::filter::FilterFn::new(move |metadata| { diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index d50117c09e2..87529580ef6 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -173,11 +173,7 @@ fn mallinfo() -> libc::mallinfo2 { } fn into_result(result: c_int) -> Result<(), c_int> { - if result == 1 { - Ok(()) - } else { - Err(result) - } + if result == 1 { Ok(()) } else { Err(result) } } #[cfg(test)] diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs index 2e90c0ddf33..6ee3e74da4d 100644 --- a/common/malloc_utils/src/jemalloc.rs +++ b/common/malloc_utils/src/jemalloc.rs @@ -8,10 +8,10 @@ //! A) `JEMALLOC_SYS_WITH_MALLOC_CONF` at compile-time. //! B) `_RJEM_MALLOC_CONF` at runtime. use metrics::{ - set_gauge, set_gauge_vec, try_create_int_gauge, try_create_int_gauge_vec, IntGauge, IntGaugeVec, + IntGauge, IntGaugeVec, set_gauge, set_gauge_vec, try_create_int_gauge, try_create_int_gauge_vec, }; use std::sync::LazyLock; -use tikv_jemalloc_ctl::{arenas, epoch, raw, stats, Access, AsName, Error}; +use tikv_jemalloc_ctl::{Access, AsName, Error, arenas, epoch, raw, stats}; #[global_allocator] static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; @@ -114,8 +114,10 @@ pub fn scrape_jemalloc_metrics_fallible() -> Result<(), Error> { } unsafe fn set_stats_gauge(metric: &metrics::Result, arena: u32, stat: &str) { - if let Ok(val) = raw::read::(stat.as_bytes()) { - set_gauge_vec(metric, &[&format!("arena_{arena}")], val as i64); + unsafe { + if let Ok(val) = raw::read::(stat.as_bytes()) { + set_gauge_vec(metric, &[&format!("arena_{arena}")], val as i64); + } } } diff --git a/common/metrics/src/lib.rs b/common/metrics/src/lib.rs index 22513af8bc5..de64fbb2c90 100644 --- a/common/metrics/src/lib.rs +++ b/common/metrics/src/lib.rs @@ -55,10 +55,9 @@ use std::time::Duration; use prometheus::core::{Atomic, GenericGauge, GenericGaugeVec}; pub use prometheus::{ - exponential_buckets, linear_buckets, + DEFAULT_BUCKETS, Encoder, Gauge, GaugeVec, Histogram, HistogramTimer, HistogramVec, IntCounter, + IntCounterVec, IntGauge, IntGaugeVec, Result, TextEncoder, exponential_buckets, linear_buckets, proto::{Metric, MetricFamily, MetricType}, - Encoder, Gauge, GaugeVec, Histogram, HistogramTimer, HistogramVec, IntCounter, IntCounterVec, - IntGauge, IntGaugeVec, Result, TextEncoder, DEFAULT_BUCKETS, }; /// Collect all the metrics for reporting. @@ -423,10 +422,10 @@ pub trait TryExt { impl TryExt for std::result::Result { fn discard_timer_on_break(self, timer_opt: &mut Option) -> Self { - if self.is_err() { - if let Some(timer) = timer_opt.take() { - timer.stop_and_discard(); - } + if self.is_err() + && let Some(timer) = timer_opt.take() + { + timer.stop_and_discard(); } self } @@ -434,10 +433,10 @@ impl TryExt for std::result::Result { impl TryExt for Option { fn discard_timer_on_break(self, timer_opt: &mut Option) -> Self { - if self.is_none() { - if let Some(timer) = timer_opt.take() { - timer.stop_and_discard(); - } + if self.is_none() + && let Some(timer) = timer_opt.take() + { + timer.stop_and_discard(); } self } diff --git a/common/monitoring_api/src/lib.rs b/common/monitoring_api/src/lib.rs index 966a1a30542..465618c9a82 100644 --- a/common/monitoring_api/src/lib.rs +++ b/common/monitoring_api/src/lib.rs @@ -10,7 +10,7 @@ pub use reqwest::{StatusCode, Url}; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use task_executor::TaskExecutor; -use tokio::time::{interval_at, Instant}; +use tokio::time::{Instant, interval_at}; use tracing::{debug, error, info}; use types::*; diff --git a/common/sensitive_url/src/lib.rs b/common/sensitive_url/src/lib.rs index b6068a2dca6..64ad070a1fd 100644 --- a/common/sensitive_url/src/lib.rs +++ b/common/sensitive_url/src/lib.rs @@ -1,4 +1,4 @@ -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer, de}; use std::fmt; use std::str::FromStr; use url::Url; diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index a742e29457d..e51bc3f6473 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -8,8 +8,8 @@ pub use crate::manual_slot_clock::ManualSlotClock as TestingSlotClock; pub use crate::manual_slot_clock::ManualSlotClock; pub use crate::system_time_slot_clock::SystemTimeSlotClock; pub use metrics::scrape_for_metrics; -use types::consts::bellatrix::INTERVALS_PER_SLOT; pub use types::Slot; +use types::consts::bellatrix::INTERVALS_PER_SLOT; /// A clock that reports the current slot. /// diff --git a/common/system_health/src/lib.rs b/common/system_health/src/lib.rs index 9f351e943bb..31b222c5407 100644 --- a/common/system_health/src/lib.rs +++ b/common/system_health/src/lib.rs @@ -1,4 +1,4 @@ -use lighthouse_network::{types::SyncState, NetworkGlobals}; +use lighthouse_network::{NetworkGlobals, types::SyncState}; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use std::path::{Path, PathBuf}; diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index 3b9e9c43210..5f0c822b03f 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -144,11 +144,11 @@ impl TaskExecutor { if let Some(handle) = self.handle() { let fut = async move { let timer = metrics::start_timer_vec(&metrics::TASKS_HISTOGRAM, &[name]); - if let Err(join_error) = task_handle.await { - if let Ok(_panic) = join_error.try_into_panic() { - let _ = shutdown_sender - .try_send(ShutdownReason::Failure("Panic (fatal error)")); - } + if let Err(join_error) = task_handle.await + && let Ok(_panic) = join_error.try_into_panic() + { + let _ = + shutdown_sender.try_send(ShutdownReason::Failure("Panic (fatal error)")); } drop(timer); }; @@ -282,7 +282,7 @@ impl TaskExecutor { &self, task: F, name: &'static str, - ) -> Option>> + ) -> Option> + Send + 'static + use> where F: FnOnce() -> R + Send + 'static, R: Send + 'static, @@ -367,7 +367,7 @@ impl TaskExecutor { /// Returns a future that completes when `async-channel::Sender` is dropped or () is sent, /// which translates to the exit signal being triggered. - pub fn exit(&self) -> impl Future { + pub fn exit(&self) -> impl Future + 'static { let exit = self.exit.clone(); async move { let _ = exit.recv().await; diff --git a/common/test_random_derive/src/lib.rs b/common/test_random_derive/src/lib.rs index 8c4b1ef7c35..3017936f1a1 100644 --- a/common/test_random_derive/src/lib.rs +++ b/common/test_random_derive/src/lib.rs @@ -1,6 +1,6 @@ use proc_macro::TokenStream; use quote::quote; -use syn::{parse_macro_input, DeriveInput}; +use syn::{DeriveInput, parse_macro_input}; /// Returns true if some field has an attribute declaring it should be generated from default (not /// randomized). @@ -27,7 +27,7 @@ pub fn test_random_derive(input: TokenStream) -> TokenStream { let mut quotes = vec![]; for field in &struct_data.fields { match &field.ident { - Some(ref ident) => { + Some(ident) => { if should_use_default(field) { quotes.push(quote! { #ident: <_>::default(), diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index 2e971a8b1ae..bae36789bb5 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -1,10 +1,10 @@ use crate::{Error as DirError, ValidatorDir}; use bls::get_withdrawal_credentials; -use deposit_contract::{encode_eth1_tx_data, Error as DepositError}; +use deposit_contract::{Error as DepositError, encode_eth1_tx_data}; use eth2_keystore::{Error as KeystoreError, Keystore, KeystoreBuilder, PlainText}; use filesystem::create_with_600_perms; -use rand::{distributions::Alphanumeric, Rng}; -use std::fs::{create_dir_all, File}; +use rand::{Rng, distr::Alphanumeric}; +use std::fs::{File, create_dir_all}; use std::io::{self, Write}; use std::path::{Path, PathBuf}; use types::{ChainSpec, DepositData, Hash256, Keypair, Signature}; @@ -314,7 +314,7 @@ pub fn write_password_to_file>(path: P, bytes: &[u8]) -> Result<( /// Generates a random keystore with a random password. fn random_keystore() -> Result<(Keystore, PlainText), Error> { let keypair = Keypair::random(); - let password: PlainText = rand::thread_rng() + let password: PlainText = rand::rng() .sample_iter(&Alphanumeric) .take(DEFAULT_PASSWORD_LEN) .map(char::from) diff --git a/common/validator_dir/src/insecure_keys.rs b/common/validator_dir/src/insecure_keys.rs index 83720bb58cd..7e48a134544 100644 --- a/common/validator_dir/src/insecure_keys.rs +++ b/common/validator_dir/src/insecure_keys.rs @@ -6,8 +6,8 @@ use crate::{Builder, BuilderError}; use eth2_keystore::{ + DKLEN, Keystore, KeystoreBuilder, PlainText, json_keystore::{Kdf, Scrypt}, - Keystore, KeystoreBuilder, PlainText, DKLEN, }; use std::path::PathBuf; use types::test_utils::generate_deterministic_keypair; diff --git a/common/validator_dir/src/lib.rs b/common/validator_dir/src/lib.rs index c21b0b44cf7..7c1f0721e2a 100644 --- a/common/validator_dir/src/lib.rs +++ b/common/validator_dir/src/lib.rs @@ -11,10 +11,10 @@ pub mod insecure_keys; mod validator_dir; pub use crate::validator_dir::{ - unlock_keypair_from_password_path, Error, Eth1DepositData, ValidatorDir, - ETH1_DEPOSIT_TX_HASH_FILE, + ETH1_DEPOSIT_TX_HASH_FILE, Error, Eth1DepositData, ValidatorDir, + unlock_keypair_from_password_path, }; pub use builder::{ - keystore_password_path, Builder, Error as BuilderError, ETH1_DEPOSIT_DATA_FILE, - VOTING_KEYSTORE_FILE, WITHDRAWAL_KEYSTORE_FILE, + Builder, ETH1_DEPOSIT_DATA_FILE, Error as BuilderError, VOTING_KEYSTORE_FILE, + WITHDRAWAL_KEYSTORE_FILE, keystore_password_path, }; diff --git a/common/validator_dir/src/validator_dir.rs b/common/validator_dir/src/validator_dir.rs index 4f9b786844d..0ed28c4ddc2 100644 --- a/common/validator_dir/src/validator_dir.rs +++ b/common/validator_dir/src/validator_dir.rs @@ -1,12 +1,12 @@ use crate::builder::{ - keystore_password_path, ETH1_DEPOSIT_AMOUNT_FILE, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, - WITHDRAWAL_KEYSTORE_FILE, + ETH1_DEPOSIT_AMOUNT_FILE, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, + WITHDRAWAL_KEYSTORE_FILE, keystore_password_path, }; use deposit_contract::decode_eth1_tx_data; use derivative::Derivative; use eth2_keystore::{Error as KeystoreError, Keystore, PlainText}; use lockfile::{Lockfile, LockfileError}; -use std::fs::{read, write, File}; +use std::fs::{File, read, write}; use std::io; use std::path::{Path, PathBuf}; use tree_hash::TreeHash; diff --git a/common/validator_dir/tests/tests.rs b/common/validator_dir/tests/tests.rs index a782d81bbe0..7d9730ebd37 100644 --- a/common/validator_dir/tests/tests.rs +++ b/common/validator_dir/tests/tests.rs @@ -3,11 +3,11 @@ use eth2_keystore::{Keystore, KeystoreBuilder, PlainText}; use std::fs::{self, File}; use std::path::Path; -use tempfile::{tempdir, TempDir}; -use types::{test_utils::generate_deterministic_keypair, EthSpec, Keypair, MainnetEthSpec}; +use tempfile::{TempDir, tempdir}; +use types::{EthSpec, Keypair, MainnetEthSpec, test_utils::generate_deterministic_keypair}; use validator_dir::{ - Builder, BuilderError, ValidatorDir, ETH1_DEPOSIT_DATA_FILE, ETH1_DEPOSIT_TX_HASH_FILE, - VOTING_KEYSTORE_FILE, WITHDRAWAL_KEYSTORE_FILE, + Builder, BuilderError, ETH1_DEPOSIT_DATA_FILE, ETH1_DEPOSIT_TX_HASH_FILE, VOTING_KEYSTORE_FILE, + ValidatorDir, WITHDRAWAL_KEYSTORE_FILE, }; /// A very weak password with which to encrypt the keystores. diff --git a/common/warp_utils/src/json.rs b/common/warp_utils/src/json.rs index bc7d61557b9..b3f066194d6 100644 --- a/common/warp_utils/src/json.rs +++ b/common/warp_utils/src/json.rs @@ -20,12 +20,12 @@ pub fn json() -> impl Filter(CONTENT_TYPE_HEADER) .and(warp::body::bytes()) .and_then(|header: Option, bytes: Bytes| async move { - if let Some(header) = header { - if header == SSZ_CONTENT_TYPE_HEADER { - return Err(reject::unsupported_media_type( - "The request's content-type is not supported".to_string(), - )); - } + if let Some(header) = header + && header == SSZ_CONTENT_TYPE_HEADER + { + return Err(reject::unsupported_media_type( + "The request's content-type is not supported".to_string(), + )); } Json::decode(bytes) .map_err(|err| reject::custom_deserialize_error(format!("{:?}", err))) @@ -33,17 +33,17 @@ pub fn json() -> impl Filter( -) -> impl Filter + Copy { +pub fn json_no_body() +-> impl Filter + Copy { warp::header::optional::(CONTENT_TYPE_HEADER) .and(warp::body::bytes()) .and_then(|header: Option, bytes: Bytes| async move { - if let Some(header) = header { - if header == SSZ_CONTENT_TYPE_HEADER { - return Err(reject::unsupported_media_type( - "The request's content-type is not supported".to_string(), - )); - } + if let Some(header) = header + && header == SSZ_CONTENT_TYPE_HEADER + { + return Err(reject::unsupported_media_type( + "The request's content-type is not supported".to_string(), + )); } // Handle the case when the HTTP request has no body, i.e., without the -d header diff --git a/common/warp_utils/src/query.rs b/common/warp_utils/src/query.rs index c5ed5c5f128..8121a90139a 100644 --- a/common/warp_utils/src/query.rs +++ b/common/warp_utils/src/query.rs @@ -4,8 +4,8 @@ use warp::Filter; // Custom query filter using `serde_array_query`. // This allows duplicate keys inside query strings. -pub fn multi_key_query<'de, T: Deserialize<'de>>( -) -> impl warp::Filter,), Error = std::convert::Infallible> + Copy +pub fn multi_key_query<'de, T: Deserialize<'de>>() +-> impl warp::Filter,), Error = std::convert::Infallible> + Copy { raw_query().then(|query_str: String| async move { serde_array_query::from_str(&query_str).map_err(|e| custom_bad_request(e.to_string())) diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs index a433f5351ef..c4788709505 100644 --- a/common/warp_utils/src/reject.rs +++ b/common/warp_utils/src/reject.rs @@ -3,7 +3,7 @@ use std::convert::Infallible; use std::error::Error; use std::fmt; use std::fmt::Debug; -use warp::{http::StatusCode, reject::Reject, reply::Response, Reply}; +use warp::{Reply, http::StatusCode, reject::Reject, reply::Response}; #[derive(Debug)] pub struct ServerSentEventError(pub String); diff --git a/common/warp_utils/src/uor.rs b/common/warp_utils/src/uor.rs index 363f1df7d4d..c5f421693b5 100644 --- a/common/warp_utils/src/uor.rs +++ b/common/warp_utils/src/uor.rs @@ -1,4 +1,4 @@ -use warp::{filters::BoxedFilter, Filter, Rejection}; +use warp::{Filter, Rejection, filters::BoxedFilter}; /// Mixin trait for `Filter` providing the unifying-or method. pub trait UnifyingOrFilter: Filter + Sized + Send + Sync + 'static diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index a894f26ebe5..7bd8da4cbd6 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -15,10 +15,10 @@ use std::marker::PhantomData; use std::time::Duration; use tracing::{debug, instrument, warn}; use types::{ - consts::bellatrix::INTERVALS_PER_SLOT, AbstractExecPayload, AttestationShufflingId, - AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, - Epoch, EthSpec, ExecPayload, ExecutionBlockHash, FixedBytesExtended, Hash256, - IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, + AbstractExecPayload, AttestationShufflingId, AttesterSlashingRef, BeaconBlockRef, BeaconState, + BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, + FixedBytesExtended, Hash256, IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, + consts::bellatrix::INTERVALS_PER_SLOT, }; #[derive(Debug)] @@ -849,7 +849,7 @@ where block_slot: block.slot(), block_root, payload_verification_status, - }) + }); } } } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 8d510d0e896..25c3f03d3b9 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -17,9 +17,9 @@ use std::time::Duration; use store::MemoryStore; use types::SingleAttestation; use types::{ - test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, - Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, IndexedAttestation, MainnetEthSpec, - RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, + BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, + ForkName, Hash256, IndexedAttestation, MainnetEthSpec, RelativeEpoch, SignedBeaconBlock, Slot, + SubnetId, test_utils::generate_deterministic_keypair, }; pub type E = MainnetEthSpec; @@ -752,10 +752,10 @@ async fn invalid_attestation_empty_bitfield() { .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| match attestation { - IndexedAttestation::Base(ref mut att) => { + IndexedAttestation::Base(att) => { att.attesting_indices = vec![].into(); } - IndexedAttestation::Electra(ref mut att) => { + IndexedAttestation::Electra(att) => { att.attesting_indices = vec![].into(); } }, diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index 271e676df1c..bf075ec15a5 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -1,4 +1,4 @@ -use ethereum_hashing::{hash, hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{ZERO_HASHES, hash, hash32_concat}; use safe_arith::ArithError; use std::sync::LazyLock; @@ -113,13 +113,13 @@ impl MerkleTree { Zero(_) => { *self = MerkleTree::create(&[elem], depth); } - Node(ref mut hash, ref mut left, ref mut right) => { + Node(hash, left, right) => { let left: &mut MerkleTree = &mut *left; let right: &mut MerkleTree = &mut *right; match (&*left, &*right) { // Tree is full (Leaf(_), Leaf(_)) | (Finalized(_), Leaf(_)) => { - return Err(MerkleTreeError::MerkleTreeFull) + return Err(MerkleTreeError::MerkleTreeFull); } // There is a right node so insert in right node (Node(_, _, _), Node(_, _, _)) | (Finalized(_), Node(_, _, _)) => { diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index b05a55e6862..4581e5b78fa 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -6,7 +6,7 @@ mod proto_array_fork_choice; mod ssz_container; pub use crate::justified_balances::JustifiedBalances; -pub use crate::proto_array::{calculate_committee_fraction, InvalidationOperation}; +pub use crate::proto_array::{InvalidationOperation, calculate_committee_fraction}; pub use crate::proto_array_fork_choice::{ Block, DisallowedReOrgOffsets, DoNotReOrg, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index cbae54bd362..18af2dfc24c 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,8 +1,8 @@ use crate::error::InvalidBestNodeInfo; -use crate::{error::Error, Block, ExecutionStatus, JustifiedBalances}; +use crate::{Block, ExecutionStatus, JustifiedBalances, error::Error}; use serde::{Deserialize, Serialize}; -use ssz::four_byte_option_impl; use ssz::Encode; +use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; use superstruct::superstruct; @@ -223,21 +223,18 @@ impl ProtoArray { // the delta by the new score amount (unless the block has an invalid execution status). // // https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance - if let Some(proposer_score_boost) = spec.proposer_score_boost { - if proposer_boost_root != Hash256::zero() + if let Some(proposer_score_boost) = spec.proposer_score_boost + && proposer_boost_root != Hash256::zero() && proposer_boost_root == node.root // Invalid nodes (or their ancestors) should not receive a proposer boost. && !execution_status_is_invalid - { - proposer_score = calculate_committee_fraction::( - new_justified_balances, - proposer_score_boost, - ) - .ok_or(Error::ProposerBoostOverflow(node_index))?; - node_delta = node_delta - .checked_add(proposer_score as i64) - .ok_or(Error::DeltaOverflow(node_index))?; - } + { + proposer_score = + calculate_committee_fraction::(new_justified_balances, proposer_score_boost) + .ok_or(Error::ProposerBoostOverflow(node_index))?; + node_delta = node_delta + .checked_add(proposer_score as i64) + .ok_or(Error::DeltaOverflow(node_index))?; } // Apply the delta to the node. @@ -428,7 +425,7 @@ impl ProtoArray { return Err(Error::InvalidAncestorOfValidPayload { ancestor_block_root: node.root, ancestor_payload_block_hash, - }) + }); } }; @@ -537,7 +534,7 @@ impl ProtoArray { return Err(Error::ValidExecutionStatusBecameInvalid { block_root: node.root, payload_block_hash: *hash, - }) + }); } ExecutionStatus::Optimistic(hash) => { invalidated_indices.insert(index); @@ -594,27 +591,27 @@ impl ProtoArray { .get_mut(index) .ok_or(Error::InvalidNodeIndex(index))?; - if let Some(parent_index) = node.parent { - if invalidated_indices.contains(&parent_index) { - match &node.execution_status { - ExecutionStatus::Valid(hash) => { - return Err(Error::ValidExecutionStatusBecameInvalid { - block_root: node.root, - payload_block_hash: *hash, - }) - } - ExecutionStatus::Optimistic(hash) | ExecutionStatus::Invalid(hash) => { - node.execution_status = ExecutionStatus::Invalid(*hash) - } - ExecutionStatus::Irrelevant(_) => { - return Err(Error::IrrelevantDescendant { - block_root: node.root, - }) - } + if let Some(parent_index) = node.parent + && invalidated_indices.contains(&parent_index) + { + match &node.execution_status { + ExecutionStatus::Valid(hash) => { + return Err(Error::ValidExecutionStatusBecameInvalid { + block_root: node.root, + payload_block_hash: *hash, + }); + } + ExecutionStatus::Optimistic(hash) | ExecutionStatus::Invalid(hash) => { + node.execution_status = ExecutionStatus::Invalid(*hash) + } + ExecutionStatus::Irrelevant(_) => { + return Err(Error::IrrelevantDescendant { + block_root: node.root, + }); } - - invalidated_indices.insert(index); } + + invalidated_indices.insert(index); } } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 76a07ac6bec..ecefb9ff518 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,11 +1,11 @@ use crate::{ + JustifiedBalances, error::Error, proto_array::{ - calculate_committee_fraction, InvalidationOperation, Iter, ProposerBoost, ProtoArray, - ProtoNode, + InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode, + calculate_committee_fraction, }, ssz_container::SszContainer, - JustifiedBalances, }; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; @@ -705,24 +705,22 @@ impl ProtoArrayForkChoice { // If the invalid root was boosted, apply the weight to it and // ancestors. - if let Some(proposer_score_boost) = spec.proposer_score_boost { - if self.proto_array.previous_proposer_boost.root == node.root { - // Compute the score based upon the current balances. We can't rely on - // the `previous_proposr_boost.score` since it is set to zero with an - // invalid node. - let proposer_score = calculate_committee_fraction::( - &self.balances, - proposer_score_boost, - ) - .ok_or("Failed to compute proposer boost")?; - // Store the score we've applied here so it can be removed in - // a later call to `apply_score_changes`. - self.proto_array.previous_proposer_boost.score = proposer_score; - // Apply this boost to this node. - restored_weight = restored_weight - .checked_add(proposer_score) - .ok_or("Overflow when adding boost to weight")?; - } + if let Some(proposer_score_boost) = spec.proposer_score_boost + && self.proto_array.previous_proposer_boost.root == node.root + { + // Compute the score based upon the current balances. We can't rely on + // the `previous_proposr_boost.score` since it is set to zero with an + // invalid node. + let proposer_score = + calculate_committee_fraction::(&self.balances, proposer_score_boost) + .ok_or("Failed to compute proposer boost")?; + // Store the score we've applied here so it can be removed in + // a later call to `apply_score_changes`. + self.proto_array.previous_proposer_boost.score = proposer_score; + // Apply this boost to this node. + restored_weight = restored_weight + .checked_add(proposer_score) + .ok_or("Overflow when adding boost to weight")?; } // Add the restored weight to the node and all ancestors. @@ -864,7 +862,7 @@ impl ProtoArrayForkChoice { pub fn iter_block_roots( &self, block_root: &Hash256, - ) -> impl Iterator + use<'_> { + ) -> impl Iterator + '_ { self.proto_array.iter_block_roots(block_root) } diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 8abb60d8e6a..c13c6a0d59e 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -1,10 +1,10 @@ use crate::proto_array::ProposerBoost; use crate::{ + Error, JustifiedBalances, proto_array::{ProtoArray, ProtoNodeV17}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, - Error, JustifiedBalances, }; -use ssz::{four_byte_option_impl, Encode}; +use ssz::{Encode, four_byte_option_impl}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; use superstruct::superstruct; diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index 0cdb2a2beda..56e667cdd37 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -1,7 +1,7 @@ use crate::{ - per_block_processing, per_epoch_processing::EpochProcessingSummary, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, - VerifyBlockRoot, + VerifyBlockRoot, per_block_processing, per_epoch_processing::EpochProcessingSummary, + per_slot_processing, }; use itertools::Itertools; use std::iter::Peekable; @@ -193,12 +193,11 @@ where } // Otherwise try to source a root from the previous block. - if let Some(prev_i) = i.checked_sub(1) { - if let Some(prev_block) = blocks.get(prev_i) { - if prev_block.slot() == slot { - return Ok(prev_block.state_root()); - } - } + if let Some(prev_i) = i.checked_sub(1) + && let Some(prev_block) = blocks.get(prev_i) + && prev_block.slot() == slot + { + return Ok(prev_block.state_root()); } self.state_root_miss = true; diff --git a/consensus/state_processing/src/common/get_attestation_participation.rs b/consensus/state_processing/src/common/get_attestation_participation.rs index 2c6fd3b215f..71bf6329f11 100644 --- a/consensus/state_processing/src/common/get_attestation_participation.rs +++ b/consensus/state_processing/src/common/get_attestation_participation.rs @@ -1,13 +1,13 @@ use integer_sqrt::IntegerSquareRoot; use smallvec::SmallVec; +use types::{AttestationData, BeaconState, ChainSpec, EthSpec}; use types::{ + BeaconStateError as Error, consts::altair::{ NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, }, - BeaconStateError as Error, }; -use types::{AttestationData, BeaconState, ChainSpec, EthSpec}; /// Get the participation flags for a valid attestation. /// diff --git a/consensus/state_processing/src/common/get_attesting_indices.rs b/consensus/state_processing/src/common/get_attesting_indices.rs index 842adce431c..e4f5aa3c8bc 100644 --- a/consensus/state_processing/src/common/get_attesting_indices.rs +++ b/consensus/state_processing/src/common/get_attesting_indices.rs @@ -118,10 +118,10 @@ pub mod attesting_indices_electra { .iter() .enumerate() .filter_map(|(i, &index)| { - if let Ok(aggregation_bit_index) = committee_offset.safe_add(i) { - if aggregation_bits.get(aggregation_bit_index).unwrap_or(false) { - return Some(index as u64); - } + if let Ok(aggregation_bit_index) = committee_offset.safe_add(i) + && aggregation_bits.get(aggregation_bit_index).unwrap_or(false) + { + return Some(index as u64); } None }) diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index bd60f16014c..52f360849e0 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -1,8 +1,8 @@ use crate::common::update_progressive_balances_cache::update_progressive_balances_on_slashing; use crate::{ + ConsensusContext, common::{decrease_balance, increase_balance, initiate_validator_exit}, per_block_processing::errors::BlockProcessingError, - ConsensusContext, }; use safe_arith::SafeArith; use std::cmp; diff --git a/consensus/state_processing/src/common/update_progressive_balances_cache.rs b/consensus/state_processing/src/common/update_progressive_balances_cache.rs index f34ee83e6d6..24a5db20258 100644 --- a/consensus/state_processing/src/common/update_progressive_balances_cache.rs +++ b/consensus/state_processing/src/common/update_progressive_balances_cache.rs @@ -7,8 +7,8 @@ use crate::{BlockProcessingError, EpochProcessingError}; use metrics::set_gauge; use tracing::instrument; use types::{ - is_progressive_balances_enabled, BeaconState, BeaconStateError, ChainSpec, Epoch, - EpochTotalBalances, EthSpec, ParticipationFlags, ProgressiveBalancesCache, Validator, + BeaconState, BeaconStateError, ChainSpec, Epoch, EpochTotalBalances, EthSpec, + ParticipationFlags, ProgressiveBalancesCache, Validator, is_progressive_balances_enabled, }; /// Initializes the `ProgressiveBalancesCache` if it is unbuilt. diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index d0086c1041f..07d554e3037 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,7 +1,7 @@ +use crate::EpochCacheError; use crate::common::{attesting_indices_base, attesting_indices_electra}; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; -use crate::EpochCacheError; -use std::collections::{hash_map::Entry, HashMap}; +use std::collections::{HashMap, hash_map::Entry}; use tree_hash::TreeHash; use types::{ AbstractExecPayload, AttestationRef, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index adabf6862d3..9b2696c6d59 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -37,12 +37,12 @@ pub use genesis::{ process_activations, }; pub use per_block_processing::{ - block_signature_verifier, errors::BlockProcessingError, per_block_processing, signature_sets, BlockSignatureStrategy, BlockSignatureVerifier, VerifyBlockRoot, VerifySignatures, + block_signature_verifier, errors::BlockProcessingError, per_block_processing, signature_sets, }; pub use per_epoch_processing::{ errors::EpochProcessingError, process_epoch as per_epoch_processing, }; -pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError}; +pub use per_slot_processing::{Error as SlotProcessingError, per_slot_processing}; pub use types::{EpochCache, EpochCacheError, EpochCacheKey}; pub use verify_operation::{SigVerifiedOp, TransformPersist, VerifyOperation, VerifyOperationAt}; diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index 8772dbd4f84..65690ae30ae 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -7,23 +7,23 @@ use std::sync::LazyLock; pub static PARTICIPATION_PREV_EPOCH_HEAD_ATTESTING_GWEI_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_gauge( - "beacon_participation_prev_epoch_head_attesting_gwei_total", - "Total effective balance (gwei) of validators who attested to the head in the previous epoch" - ) + "beacon_participation_prev_epoch_head_attesting_gwei_total", + "Total effective balance (gwei) of validators who attested to the head in the previous epoch", + ) }); pub static PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_gauge( - "beacon_participation_prev_epoch_target_attesting_gwei_total", - "Total effective balance (gwei) of validators who attested to the target in the previous epoch" - ) + "beacon_participation_prev_epoch_target_attesting_gwei_total", + "Total effective balance (gwei) of validators who attested to the target in the previous epoch", + ) }); pub static PARTICIPATION_PREV_EPOCH_SOURCE_ATTESTING_GWEI_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_gauge( - "beacon_participation_prev_epoch_source_attesting_gwei_total", - "Total effective balance (gwei) of validators who attested to the source in the previous epoch" - ) + "beacon_participation_prev_epoch_source_attesting_gwei_total", + "Total effective balance (gwei) of validators who attested to the source in the previous epoch", + ) }); pub static PARTICIPATION_CURRENT_EPOCH_TOTAL_ACTIVE_GWEI_TOTAL: LazyLock> = LazyLock::new(|| { @@ -63,7 +63,7 @@ pub static PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Laz > = LazyLock::new(|| { try_create_int_gauge( "beacon_participation_prev_epoch_target_attesting_gwei_progressive_total", - "Progressive total effective balance (gwei) of validators who attested to the target in the previous epoch" + "Progressive total effective balance (gwei) of validators who attested to the target in the previous epoch", ) }); pub static PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: LazyLock< @@ -71,6 +71,6 @@ pub static PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Laz > = LazyLock::new(|| { try_create_int_gauge( "beacon_participation_curr_epoch_target_attesting_gwei_progressive_total", - "Progressive total effective balance (gwei) of validators who attested to the target in the current epoch" + "Progressive total effective balance (gwei) of validators who attested to the target in the current epoch", ) }); diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 08cfd9cba84..1219c7df442 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -1,6 +1,6 @@ use crate::common::{altair::BaseRewardPerIncrement, decrease_balance, increase_balance}; use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid}; -use crate::{signature_sets::sync_aggregate_signature_set, VerifySignatures}; +use crate::{VerifySignatures, signature_sets::sync_aggregate_signature_set}; use safe_arith::SafeArith; use std::borrow::Cow; use types::consts::altair::{PROPOSER_WEIGHT, SYNC_REWARD_WEIGHT, WEIGHT_DENOMINATOR}; diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 8d4a5441967..9aa44137d8e 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -3,7 +3,7 @@ use super::signature_sets::{Error as SignatureSetError, *}; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; use crate::{ConsensusContext, ContextError}; -use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; +use bls::{PublicKey, PublicKeyBytes, SignatureSet, verify_signature_sets}; use std::borrow::Cow; use types::{ AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, @@ -324,17 +324,17 @@ where &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { - if let Ok(sync_aggregate) = block.message().body().sync_aggregate() { - if let Some(signature_set) = sync_aggregate_signature_set( + if let Ok(sync_aggregate) = block.message().body().sync_aggregate() + && let Some(signature_set) = sync_aggregate_signature_set( &self.decompressor, sync_aggregate, block.slot(), block.parent_root(), self.state, self.spec, - )? { - self.sets.push(signature_set); - } + )? + { + self.sets.push(signature_set); } Ok(()) } diff --git a/consensus/state_processing/src/per_block_processing/deneb.rs b/consensus/state_processing/src/per_block_processing/deneb.rs index 217c2ea30b6..a57c080c03b 100644 --- a/consensus/state_processing/src/per_block_processing/deneb.rs +++ b/consensus/state_processing/src/per_block_processing/deneb.rs @@ -1,5 +1,5 @@ use ethereum_hashing::hash_fixed; -use types::{KzgCommitment, VersionedHash, VERSIONED_HASH_VERSION_KZG}; +use types::{KzgCommitment, VERSIONED_HASH_VERSION_KZG, VersionedHash}; pub fn kzg_commitment_to_versioned_hash(kzg_commitment: &KzgCommitment) -> VersionedHash { let mut hashed_commitment = hash_fixed(&kzg_commitment.0); diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 82dd6167241..9a1c6c2f6ad 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -1,10 +1,10 @@ use super::*; +use crate::VerifySignatures; use crate::common::{ get_attestation_participation_flag_indices, increase_balance, initiate_validator_exit, slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; -use crate::VerifySignatures; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; use types::typenum::U33; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 526602600e0..183063ac762 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -5,10 +5,10 @@ use crate::per_block_processing::errors::{ DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex, ProposerSlashingInvalid, }; -use crate::{per_block_processing, BlockReplayError, BlockReplayer}; +use crate::{BlockReplayError, BlockReplayer, per_block_processing}; use crate::{ - per_block_processing::{process_operations, verify_exit::verify_exit}, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, + per_block_processing::{process_operations, verify_exit::verify_exit}, }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use ssz_types::Bitfield; @@ -717,10 +717,10 @@ async fn invalid_attester_slashing_not_slashable() { let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); match &mut attester_slashing { - AttesterSlashing::Base(ref mut attester_slashing) => { + AttesterSlashing::Base(attester_slashing) => { attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); } - AttesterSlashing::Electra(ref mut attester_slashing) => { + AttesterSlashing::Electra(attester_slashing) => { attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); } } @@ -752,10 +752,10 @@ async fn invalid_attester_slashing_1_invalid() { let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); match &mut attester_slashing { - AttesterSlashing::Base(ref mut attester_slashing) => { + AttesterSlashing::Base(attester_slashing) => { attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); } - AttesterSlashing::Electra(ref mut attester_slashing) => { + AttesterSlashing::Electra(attester_slashing) => { attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); } } @@ -790,10 +790,10 @@ async fn invalid_attester_slashing_2_invalid() { let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); match &mut attester_slashing { - AttesterSlashing::Base(ref mut attester_slashing) => { + AttesterSlashing::Base(attester_slashing) => { attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); } - AttesterSlashing::Electra(ref mut attester_slashing) => { + AttesterSlashing::Electra(attester_slashing) => { attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); } } diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 6b4a394c731..0d1fd17768e 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -1,7 +1,7 @@ -use super::errors::{AttestationInvalid as Invalid, BlockOperationError}; use super::VerifySignatures; -use crate::per_block_processing::is_valid_indexed_attestation; +use super::errors::{AttestationInvalid as Invalid, BlockOperationError}; use crate::ConsensusContext; +use crate::per_block_processing::is_valid_indexed_attestation; use safe_arith::SafeArith; use types::*; diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index 24024fa8990..a1dd4f8eac2 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -1,6 +1,6 @@ use super::errors::{BlockOperationError, BlsExecutionChangeInvalid as Invalid}; -use crate::per_block_processing::signature_sets::bls_execution_change_signature_set; use crate::VerifySignatures; +use crate::per_block_processing::signature_sets::bls_execution_change_signature_set; use ethereum_hashing::hash; use types::*; diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index dea17dbc0c4..bdf42dab982 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -1,7 +1,7 @@ use super::errors::{BlockOperationError, ExitInvalid}; use crate::per_block_processing::{ - signature_sets::{exit_signature_set, get_pubkey_from_state}, VerifySignatures, + signature_sets::{exit_signature_set, get_pubkey_from_state}, }; use safe_arith::SafeArith; use types::*; diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index dc4dbe7cbc7..d9e69647304 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -3,7 +3,7 @@ use crate::common::update_progressive_balances_cache::{ initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition, }; use crate::epoch_cache::initialize_epoch_cache; -use crate::per_epoch_processing::single_pass::{process_epoch_single_pass, SinglePassConfig}; +use crate::per_epoch_processing::single_pass::{SinglePassConfig, process_epoch_single_pass}; use crate::per_epoch_processing::{ capella::process_historical_summaries_update, historical_roots_update::process_historical_roots_update, diff --git a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs index 698e88b83f2..9e8a36b6d5c 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs @@ -1,5 +1,5 @@ -use crate::per_epoch_processing::single_pass::{process_epoch_single_pass, SinglePassConfig}; use crate::EpochProcessingError; +use crate::per_epoch_processing::single_pass::{SinglePassConfig, process_epoch_single_pass}; use types::beacon_state::BeaconState; use types::chain_spec::ChainSpec; use types::eth_spec::EthSpec; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs index 61b5c1ed5ab..7fb30fee1d0 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs @@ -1,6 +1,6 @@ use crate::per_epoch_processing::Error; use crate::per_epoch_processing::{ - weigh_justification_and_finalization, JustificationAndFinalizationState, + JustificationAndFinalizationState, weigh_justification_and_finalization, }; use safe_arith::SafeArith; use types::{BeaconState, EthSpec}; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs index fc55fb11144..5c08406eaef 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs @@ -1,8 +1,8 @@ use crate::EpochProcessingError; +use types::List; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; use types::participation_flags::ParticipationFlags; -use types::List; pub fn process_participation_flag_updates( state: &mut BeaconState, diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index c4059f94afc..dff445feb03 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -1,6 +1,6 @@ use crate::per_epoch_processing::{ - single_pass::{process_epoch_single_pass, SinglePassConfig}, Error, + single_pass::{SinglePassConfig, process_epoch_single_pass}, }; use types::consts::altair::PARTICIPATION_FLAG_WEIGHTS; use types::{BeaconState, ChainSpec, EthSpec}; diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index e468a8ddd6c..4f7451cae68 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -1,4 +1,4 @@ -use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; +use super::{EpochProcessingSummary, Error, process_registry_updates, process_slashings}; use crate::epoch_cache::initialize_epoch_cache; use crate::per_epoch_processing::{ effective_balance_updates::process_effective_balance_updates, diff --git a/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs index db64808a80a..1b34d515459 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs @@ -1,7 +1,7 @@ -use crate::per_epoch_processing::base::TotalBalances; use crate::per_epoch_processing::Error; +use crate::per_epoch_processing::base::TotalBalances; use crate::per_epoch_processing::{ - weigh_justification_and_finalization, JustificationAndFinalizationState, + JustificationAndFinalizationState, weigh_justification_and_finalization, }; use safe_arith::SafeArith; use types::{BeaconState, ChainSpec, EthSpec}; diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index a316c55bef3..e17caeb7ba9 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -1,10 +1,10 @@ use crate::common::{ - base::{get_base_reward, SqrtTotalActiveBalance}, + base::{SqrtTotalActiveBalance, get_base_reward}, decrease_balance, increase_balance, }; use crate::per_epoch_processing::{ - base::{TotalBalances, ValidatorStatus, ValidatorStatuses}, Delta, Error, + base::{TotalBalances, ValidatorStatus, ValidatorStatuses}, }; use safe_arith::SafeArith; use types::{BeaconState, ChainSpec, EthSpec}; @@ -190,16 +190,15 @@ fn get_attestation_deltas( .combine(inactivity_penalty_delta)?; } - if let ProposerRewardCalculation::Include = proposer_reward { - if let Some((proposer_index, proposer_delta)) = proposer_delta { - if include_validator_delta(proposer_index) { - deltas - .get_mut(proposer_index) - .ok_or(Error::ValidatorStatusesInconsistent)? - .inclusion_delay_delta - .combine(proposer_delta)?; - } - } + if let ProposerRewardCalculation::Include = proposer_reward + && let Some((proposer_index, proposer_delta)) = proposer_delta + && include_validator_delta(proposer_index) + { + deltas + .get_mut(proposer_index) + .ok_or(Error::ValidatorStatusesInconsistent)? + .inclusion_delay_delta + .combine(proposer_delta)?; } } diff --git a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs index 73881e932b7..8daad83a157 100644 --- a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs @@ -1,5 +1,5 @@ use super::errors::EpochProcessingError; -use crate::per_epoch_processing::single_pass::{process_epoch_single_pass, SinglePassConfig}; +use crate::per_epoch_processing::single_pass::{SinglePassConfig, process_epoch_single_pass}; use safe_arith::SafeArith; use types::beacon_state::BeaconState; use types::chain_spec::ChainSpec; diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index b2228a5a1d9..fd712cc8e50 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -1,10 +1,10 @@ -use super::base::{validator_statuses::InclusionInfo, TotalBalances, ValidatorStatus}; +use super::base::{TotalBalances, ValidatorStatus, validator_statuses::InclusionInfo}; use crate::metrics; use std::sync::Arc; use types::{ - consts::altair::{TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX}, BeaconStateError, Epoch, EthSpec, List, ParticipationFlags, ProgressiveBalancesCache, SyncCommittee, Validator, + consts::altair::{TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX}, }; /// Provides a summary of validator participation during the epoch. diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index 9db2ff30965..a5a2a69ebff 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -1,4 +1,4 @@ -use types::{milhouse, BeaconStateError, EpochCacheError, InconsistentFork}; +use types::{BeaconStateError, EpochCacheError, InconsistentFork, milhouse}; #[derive(Debug, PartialEq)] pub enum EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs index 7686932192f..8fcdda062c9 100644 --- a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs +++ b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs @@ -1,9 +1,9 @@ use super::errors::EpochProcessingError; use safe_arith::SafeArith; use tree_hash::TreeHash; +use types::Unsigned; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; -use types::Unsigned; pub fn process_historical_roots_update( state: &mut BeaconState, diff --git a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs index 3d02d797366..91250ca30ca 100644 --- a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs @@ -1,4 +1,4 @@ -use crate::per_epoch_processing::single_pass::{process_epoch_single_pass, SinglePassConfig}; +use crate::per_epoch_processing::single_pass::{SinglePassConfig, process_epoch_single_pass}; use crate::{common::initiate_validator_exit, per_epoch_processing::Error}; use safe_arith::SafeArith; use types::{BeaconState, ChainSpec, EthSpec, Validator}; diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index e3c25fff074..1584e932bdf 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -3,7 +3,7 @@ use crate::{ decrease_balance, increase_balance, update_progressive_balances_cache::initialize_progressive_balances_cache, }, - epoch_cache::{initialize_epoch_cache, PreEpochCache}, + epoch_cache::{PreEpochCache, initialize_epoch_cache}, per_block_processing::is_valid_deposit_signature, per_epoch_processing::{Delta, Error, ParticipationEpochSummary}, }; @@ -13,14 +13,14 @@ use std::cmp::{max, min}; use std::collections::{BTreeSet, HashMap}; use tracing::instrument; use types::{ + ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, DepositData, Epoch, + EthSpec, ExitCache, ForkName, List, ParticipationFlags, PendingDeposit, + ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, Vector, consts::altair::{ NUM_FLAG_INDICES, PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, milhouse::Cow, - ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, DepositData, Epoch, - EthSpec, ExitCache, ForkName, List, ParticipationFlags, PendingDeposit, - ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, Vector, }; pub struct SinglePassConfig { diff --git a/consensus/state_processing/src/per_epoch_processing/slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs index 6104208ee65..47eb06e907a 100644 --- a/consensus/state_processing/src/per_epoch_processing/slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -1,7 +1,7 @@ use crate::common::decrease_balance; use crate::per_epoch_processing::{ - single_pass::{process_epoch_single_pass, SinglePassConfig}, Error, + single_pass::{SinglePassConfig, process_epoch_single_pass}, }; use safe_arith::{SafeArith, SafeArithIter}; use types::{BeaconState, ChainSpec, EthSpec, Unsigned}; diff --git a/consensus/state_processing/src/per_epoch_processing/tests.rs b/consensus/state_processing/src/per_epoch_processing/tests.rs index b93ede248ca..6dd3f316c13 100644 --- a/consensus/state_processing/src/per_epoch_processing/tests.rs +++ b/consensus/state_processing/src/per_epoch_processing/tests.rs @@ -42,7 +42,7 @@ async fn runs_without_error() { mod release_tests { use super::*; use crate::{ - per_slot_processing::per_slot_processing, EpochProcessingError, SlotProcessingError, + EpochProcessingError, SlotProcessingError, per_slot_processing::per_slot_processing, }; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use std::sync::Arc; diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 3b20c67b4d9..a922e47cfef 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -1,3 +1,4 @@ +use crate::VerifySignatures; use crate::per_block_processing::{ errors::{ AttesterSlashingValidationError, BlsExecutionChangeValidationError, ExitValidationError, @@ -6,18 +7,17 @@ use crate::per_block_processing::{ verify_attester_slashing, verify_bls_to_execution_change, verify_exit, verify_proposer_slashing, }; -use crate::VerifySignatures; use arbitrary::Arbitrary; use derivative::Derivative; -use smallvec::{smallvec, SmallVec}; +use smallvec::{SmallVec, smallvec}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; use test_random_derive::TestRandom; use types::{ - test_utils::TestRandom, AttesterSlashing, AttesterSlashingBase, AttesterSlashingOnDisk, - AttesterSlashingRefOnDisk, BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, - ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, + AttesterSlashing, AttesterSlashingBase, AttesterSlashingOnDisk, AttesterSlashingRefOnDisk, + BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, ProposerSlashing, + SignedBlsToExecutionChange, SignedVoluntaryExit, test_utils::TestRandom, }; const MAX_FORKS_VERIFIED_AGAINST: usize = 2; @@ -260,11 +260,12 @@ impl VerifyOperation for ProposerSlashing { #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { // Only need a single epoch because the slots of the two headers must be equal. - smallvec![self - .signed_header_1 - .message - .slot - .epoch(E::slots_per_epoch())] + smallvec![ + self.signed_header_1 + .message + .slot + .epoch(E::slots_per_epoch()) + ] } } @@ -417,8 +418,8 @@ impl TransformPersist for SignedBlsToExecutionChange { mod test { use super::*; use types::{ - test_utils::{SeedableRng, TestRandom, XorShiftRng}, MainnetEthSpec, + test_utils::{SeedableRng, TestRandom, XorShiftRng}, }; type E = MainnetEthSpec; diff --git a/consensus/swap_or_not_shuffle/benches/benches.rs b/consensus/swap_or_not_shuffle/benches/benches.rs index 2909ff1ac69..f33556be386 100644 --- a/consensus/swap_or_not_shuffle/benches/benches.rs +++ b/consensus/swap_or_not_shuffle/benches/benches.rs @@ -1,4 +1,4 @@ -use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; use swap_or_not_shuffle::{compute_shuffled_index, shuffle_list as fast_shuffle}; const SHUFFLE_ROUND_COUNT: u8 = 90; diff --git a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs index a7f25ea65f7..199dd0ef3ad 100644 --- a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs +++ b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs @@ -46,11 +46,7 @@ fn do_round(seed: &[u8], index: usize, pivot: usize, round: u8, list_size: usize let source = hash_with_round_and_position(seed, round, position); let byte = source[(position % 256) / 8]; let bit = (byte >> (position % 8)) % 2; - if bit == 1 { - flip - } else { - index - } + if bit == 1 { flip } else { index } } fn hash_with_round_and_position(seed: &[u8], round: u8, position: usize) -> Hash256 { diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index aa0df818d4b..bfce4b72d22 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -48,7 +48,7 @@ metastruct = "0.1.0" milhouse = { workspace = true } parking_lot = { workspace = true } rand = { workspace = true } -rand_xorshift = "0.3.0" +rand_xorshift = "0.4.0" rayon = { workspace = true } regex = { workspace = true } rpds = { workspace = true } diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 0c8bf36c813..814001d9660 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -1,11 +1,11 @@ -use criterion::{black_box, criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use criterion::{BatchSize, BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; use milhouse::List; use rayon::prelude::*; use ssz::Encode; use std::sync::Arc; use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Epoch, Eth1Data, EthSpec, - FixedBytesExtended, Hash256, MainnetEthSpec, Validator, + BeaconState, Epoch, Eth1Data, EthSpec, FixedBytesExtended, Hash256, MainnetEthSpec, Validator, + test_utils::generate_deterministic_keypair, }; fn get_state(validator_count: usize) -> BeaconState { diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 374fd3f0ffc..e76ba48bf47 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -3,9 +3,9 @@ use super::{ ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, SecretKey, SelectionProof, Signature, SignedRoot, }; +use crate::Attestation; use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::Attestation; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use superstruct::superstruct; diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 85d442bff1e..860f0d0a2d3 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -3,11 +3,11 @@ use super::{ Signature, SignedRoot, }; use crate::slot_data::SlotData; -use crate::{context_deserialize, IndexedAttestation}; -use crate::{test_utils::TestRandom, Hash256, Slot}; use crate::{ Checkpoint, ContextDeserialize, ForkName, IndexedAttestationBase, IndexedAttestationElectra, }; +use crate::{Hash256, Slot, test_utils::TestRandom}; +use crate::{IndexedAttestation, context_deserialize}; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index 82611b6c7b6..adc3695f4a4 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -2,8 +2,8 @@ use crate::context_deserialize; use crate::indexed_attestation::{ IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, }; -use crate::{test_utils::TestRandom, EthSpec}; use crate::{ContextDeserialize, ForkName}; +use crate::{EthSpec, test_utils::TestRandom}; use derivative::Derivative; use rand::{Rng, RngCore}; use serde::{Deserialize, Deserializer, Serialize}; @@ -171,7 +171,7 @@ impl AttesterSlashing { impl TestRandom for AttesterSlashing { fn random_for_test(rng: &mut impl RngCore) -> Self { - if rng.gen_bool(0.5) { + if rng.random_bool(0.5) { AttesterSlashing::Base(AttesterSlashingBase::random_for_test(rng)) } else { AttesterSlashing::Electra(AttesterSlashingElectra::random_for_test(rng)) diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 6a2bb88d04b..297bd38d8a4 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -827,7 +827,7 @@ impl fmt::Display for BlockImportSource { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{test_ssz_tree_hash_pair_with, SeedableRng, XorShiftRng}; + use crate::test_utils::{SeedableRng, XorShiftRng, test_ssz_tree_hash_pair_with}; use ssz::Encode; type BeaconBlock = super::BeaconBlock; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index dca9aa14c3c..5538e7c45c8 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -8,7 +8,7 @@ use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; use superstruct::superstruct; use test_random_derive::TestRandom; -use tree_hash::{TreeHash, BYTES_PER_CHUNK}; +use tree_hash::{BYTES_PER_CHUNK, TreeHash}; use tree_hash_derive::TreeHash; pub type KzgCommitments = diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 8bfbce87d3c..923168030f4 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1,19 +1,19 @@ use self::committee_cache::get_active_validator_indices; -use crate::historical_summary::HistoricalSummary; -use crate::test_utils::TestRandom; use crate::ContextDeserialize; use crate::FixedBytesExtended; +use crate::historical_summary::HistoricalSummary; +use crate::test_utils::TestRandom; use crate::*; use compare_fields::CompareFields; use compare_fields_derive::CompareFields; use derivative::Derivative; use ethereum_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; -use metastruct::{metastruct, NumFields}; +use metastruct::{NumFields, metastruct}; pub use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize}; -use ssz::{ssz_encode, Decode, DecodeError, Encode}; +use ssz::{Decode, DecodeError, Encode, ssz_encode}; use ssz_derive::{Decode, Encode}; use std::hash::Hash; use std::{fmt, mem, sync::Arc}; @@ -24,8 +24,8 @@ use tree_hash::TreeHash; use tree_hash_derive::TreeHash; pub use self::committee_cache::{ - compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, - CommitteeCache, + CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, + epoch_committee_count, }; pub use crate::beacon_state::balance::Balance; pub use crate::beacon_state::exit_cache::ExitCache; @@ -33,7 +33,7 @@ pub use crate::beacon_state::progressive_balances_cache::*; pub use crate::beacon_state::slashings_cache::SlashingsCache; pub use eth_spec::*; pub use iter::BlockRootsIter; -pub use milhouse::{interface::Interface, List, Vector}; +pub use milhouse::{List, Vector, interface::Interface}; use tracing::instrument; #[macro_use] @@ -2511,19 +2511,17 @@ impl BeaconState { } // Use sync committees from `base` if they are equal. - if let Ok(current_sync_committee) = self.current_sync_committee_mut() { - if let Ok(base_sync_committee) = base.current_sync_committee() { - if current_sync_committee == base_sync_committee { - *current_sync_committee = base_sync_committee.clone(); - } - } + if let Ok(current_sync_committee) = self.current_sync_committee_mut() + && let Ok(base_sync_committee) = base.current_sync_committee() + && current_sync_committee == base_sync_committee + { + *current_sync_committee = base_sync_committee.clone(); } - if let Ok(next_sync_committee) = self.next_sync_committee_mut() { - if let Ok(base_sync_committee) = base.next_sync_committee() { - if next_sync_committee == base_sync_committee { - *next_sync_committee = base_sync_committee.clone(); - } - } + if let Ok(next_sync_committee) = self.next_sync_committee_mut() + && let Ok(base_sync_committee) = base.next_sync_committee() + && next_sync_committee == base_sync_committee + { + *next_sync_committee = base_sync_committee.clone(); } // Rebase caches like the committee caches and the pubkey cache, which are expensive to diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 513e538526b..06242e8d20e 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -5,7 +5,7 @@ use core::num::NonZeroUsize; use derivative::Derivative; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; -use ssz::{four_byte_option_impl, Decode, DecodeError, Encode}; +use ssz::{Decode, DecodeError, Encode, four_byte_option_impl}; use ssz_derive::{Decode, Encode}; use std::ops::Range; use std::sync::Arc; diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs index 019bf1c3d35..67d1155dbf1 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -1,10 +1,10 @@ use crate::beacon_state::balance::Balance; use crate::{ + BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, consts::altair::{ NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, }, - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, }; #[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index bfa7bb86d24..e5b05a4a5bd 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -2,9 +2,9 @@ use crate::test_utils::*; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::types::{ - test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, - ChainSpec, Domain, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, - MinimalEthSpec, RelativeEpoch, Slot, Vector, + BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, ChainSpec, Domain, Epoch, + EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, + Slot, Vector, test_utils::TestRandom, }; use ssz::Encode; use std::ops::Mul; diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index dbe4360901f..d65ad9a3e02 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,15 +1,15 @@ use crate::context_deserialize; use crate::test_utils::TestRandom; use crate::{ - beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, AbstractExecPayload, BeaconBlockHeader, - BeaconStateError, Blob, ChainSpec, Epoch, EthSpec, FixedVector, ForkName, Hash256, KzgProofs, - RuntimeFixedVector, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, - VariableList, + AbstractExecPayload, BeaconBlockHeader, BeaconStateError, Blob, ChainSpec, Epoch, EthSpec, + FixedVector, ForkName, Hash256, KzgProofs, RuntimeFixedVector, RuntimeVariableList, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, VariableList, + beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, }; use bls::Signature; use derivative::Derivative; -use kzg::{Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT}; -use merkle_proof::{merkle_root_from_branch, verify_merkle_proof, MerkleTreeError}; +use kzg::{BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, Blob as KzgBlob, Kzg, KzgCommitment, KzgProof}; +use merkle_proof::{MerkleTreeError, merkle_root_from_branch, verify_merkle_proof}; use rand::Rng; use safe_arith::ArithError; use serde::{Deserialize, Serialize}; diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index f8d61a0da5b..ee157d50138 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,10 +1,9 @@ use crate::beacon_block_body::KzgCommitments; use crate::{ - test_utils::TestRandom, ChainSpec, ContextDeserialize, EthSpec, - ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderRef, - ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, SignedRoot, - Uint256, + ChainSpec, ContextDeserialize, EthSpec, ExecutionPayloadHeaderBellatrix, + ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, + ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + ExecutionRequests, ForkName, ForkVersionDecode, SignedRoot, Uint256, test_utils::TestRandom, }; use bls::PublicKeyBytes; use bls::Signature; @@ -87,7 +86,7 @@ impl ForkVersionDecode for BuilderBid { ForkName::Altair | ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "unsupported fork for ExecutionPayloadHeader: {fork_name}", - ))) + ))); } ForkName::Bellatrix => { BuilderBid::Bellatrix(BuilderBidBellatrix::from_ssz_bytes(bytes)?) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 2c35e694266..659bd1d23f3 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1,4 +1,4 @@ -use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; +use crate::application_domain::{APPLICATION_DOMAIN_BUILDER, ApplicationDomain}; use crate::blob_sidecar::BlobIdentifier; use crate::data_column_sidecar::DataColumnsByRootIdentifier; use crate::*; @@ -2054,10 +2054,10 @@ where D: Deserializer<'de>, { let decoded: Option> = serde::de::Deserialize::deserialize(deserializer)?; - if let Some(fork_epoch) = decoded { - if fork_epoch.value != Epoch::max_value() { - return Ok(Some(fork_epoch)); - } + if let Some(fork_epoch) = decoded + && fork_epoch.value != Epoch::max_value() + { + return Ok(Some(fork_epoch)); } Ok(None) } diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index cf5cff8ea67..2ad9fae504d 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,6 +1,6 @@ use crate::{ - consts::altair, consts::deneb, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, - ChainSpec, Config, DenebPreset, ElectraPreset, EthSpec, FuluPreset, + AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, DenebPreset, + ElectraPreset, EthSpec, FuluPreset, consts::altair, consts::deneb, }; use maplit::hashmap; use serde::{Deserialize, Serialize}; diff --git a/consensus/types/src/consolidation_request.rs b/consensus/types/src/consolidation_request.rs index 87098beaee1..2af3426b68f 100644 --- a/consensus/types/src/consolidation_request.rs +++ b/consensus/types/src/consolidation_request.rs @@ -1,5 +1,5 @@ use crate::context_deserialize; -use crate::{test_utils::TestRandom, Address, ForkName, PublicKeyBytes, SignedRoot}; +use crate::{Address, ForkName, PublicKeyBytes, SignedRoot, test_utils::TestRandom}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 14019563311..192540c596c 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -1,4 +1,4 @@ -use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; +use crate::beacon_block_body::{BLOB_KZG_COMMITMENTS_INDEX, KzgCommitments}; use crate::context_deserialize; use crate::test_utils::TestRandom; use crate::{ diff --git a/consensus/types/src/data_column_subnet_id.rs b/consensus/types/src/data_column_subnet_id.rs index 3c3a1310e47..125a77fc1e5 100644 --- a/consensus/types/src/data_column_subnet_id.rs +++ b/consensus/types/src/data_column_subnet_id.rs @@ -1,6 +1,6 @@ //! Identifies each data column subnet by an integer identifier. -use crate::data_column_sidecar::ColumnIndex; use crate::ChainSpec; +use crate::data_column_sidecar::ColumnIndex; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use std::fmt::{self, Display}; diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index 2f9df8758b5..400fca217da 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -1,5 +1,5 @@ use crate::*; -use ethereum_hashing::{hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{ZERO_HASHES, hash32_concat}; use int_to_bytes::int_to_bytes32; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 40718380a5c..e22672aeb60 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -1,5 +1,5 @@ -use crate::test_utils::TestRandom; use crate::Epoch; +use crate::test_utils::TestRandom; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index 42de3ed806e..800f3e25f94 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -1,7 +1,7 @@ use super::Hash256; +use crate::ForkName; use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::ForkName; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 40006caf1ef..2dc45159980 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -3,9 +3,9 @@ use crate::*; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ - bit::B0, UInt, U0, U1, U10, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, - U134217728, U16, U16777216, U17, U2, U2048, U256, U262144, U32, U33554432, U4, U4096, U512, - U625, U64, U65536, U8, U8192, + U0, U1, U2, U4, U8, U10, U16, U17, U32, U64, U128, U256, U512, U625, U1024, U2048, U4096, + U8192, U65536, U131072, U262144, U1048576, U16777216, U33554432, U134217728, U1073741824, + U1099511627776, UInt, bit::B0, }; use std::fmt::{self, Debug}; use std::str::FromStr; diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index c1223a64f08..d3065afbbb0 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -1,6 +1,6 @@ -use crate::test_utils::TestRandom; use crate::FixedBytesExtended; use crate::Hash256; +use crate::test_utils::TestRandom; use derivative::Derivative; use rand::RngCore; use serde::{Deserialize, Serialize}; diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs index 60f2960afbe..02152adbf73 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution_block_header.rs @@ -17,7 +17,7 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -use crate::{Address, EthSpec, ExecutionPayloadRef, Hash256, Hash64, Uint256}; +use crate::{Address, EthSpec, ExecutionPayloadRef, Hash64, Hash256, Uint256}; use alloy_rlp::RlpEncodable; use metastruct::metastruct; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 17e3a49496e..be9b114eddd 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -152,7 +152,7 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for ExecutionPayload return Err(serde::de::Error::custom(format!( "ExecutionPayload failed to deserialize: unsupported fork '{}'", context - ))) + ))); } ForkName::Bellatrix => { Self::Bellatrix(Deserialize::deserialize(deserializer).map_err(convert_err)?) diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 9abc6e9e32c..6127d63046a 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -494,7 +494,7 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for ExecutionPayloadHead return Err(serde::de::Error::custom(format!( "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", context - ))) + ))); } ForkName::Bellatrix => { Self::Bellatrix(Deserialize::deserialize(deserializer).map_err(convert_err)?) diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index aeb14934f49..66617326e13 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -152,8 +152,8 @@ impl ForkContext { #[cfg(test)] mod tests { use super::*; - use crate::chain_spec::{BlobParameters, BlobSchedule}; use crate::MainnetEthSpec; + use crate::chain_spec::{BlobParameters, BlobSchedule}; type E = MainnetEthSpec; @@ -238,9 +238,11 @@ mod tests { ); let invalid_digest = [9, 9, 9, 9]; - assert!(context - .get_fork_from_context_bytes(invalid_digest) - .is_none()); + assert!( + context + .get_fork_from_context_bytes(invalid_digest) + .is_none() + ); } #[test] diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index ae9fff50925..31cc4187a67 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -1,9 +1,9 @@ use crate::{ - test_utils::{RngCore, TestRandom}, Hash256, + test_utils::{RngCore, TestRandom}, }; use regex::bytes::Regex; -use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer, de::Error}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; use std::str::FromStr; diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 4526b165c81..4ba695b9d51 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -1,6 +1,6 @@ use crate::context_deserialize; use crate::{ - test_utils::TestRandom, AggregateSignature, AttestationData, EthSpec, ForkName, VariableList, + AggregateSignature, AttestationData, EthSpec, ForkName, VariableList, test_utils::TestRandom, }; use core::slice::Iter; use derivative::Derivative; @@ -207,9 +207,9 @@ impl Hash for IndexedAttestation { #[cfg(test)] mod tests { use super::*; + use crate::MainnetEthSpec; use crate::slot_epoch::Epoch; use crate::test_utils::{SeedableRng, XorShiftRng}; - use crate::MainnetEthSpec; #[test] pub fn test_is_double_vote_true() { diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 7425367f109..0d56f01188e 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -155,15 +155,15 @@ pub use crate::data_column_sidecar::{ ColumnIndex, DataColumnSidecar, DataColumnSidecarList, DataColumnsByRootIdentifier, }; pub use crate::data_column_subnet_id::DataColumnSubnetId; -pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; +pub use crate::deposit::{DEPOSIT_TREE_DEPTH, Deposit}; pub use crate::deposit_data::DepositData; pub use crate::deposit_message::DepositMessage; pub use crate::deposit_request::DepositRequest; pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; pub use crate::enr_fork_id::EnrForkId; pub use crate::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; -pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; +pub use crate::eth1_data::Eth1Data; pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; pub use crate::execution_payload::{ @@ -181,7 +181,7 @@ pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; +pub use crate::graffiti::{GRAFFITI_BYTES_LEN, Graffiti}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::{ IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, @@ -235,10 +235,10 @@ pub use crate::signed_aggregate_and_proof::{ SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, }; pub use crate::signed_beacon_block::{ - ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, SignedBeaconBlockHash, SignedBlindedBeaconBlock, + ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; @@ -287,8 +287,8 @@ pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, }; -pub use context_deserialize::{context_deserialize, ContextDeserialize}; +pub use context_deserialize::{ContextDeserialize, context_deserialize}; pub use kzg::{KzgCommitment, KzgProof, VERSIONED_HASH_VERSION_KZG}; pub use milhouse::{self, List, Vector}; -pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; +pub use ssz_types::{BitList, BitVector, FixedVector, VariableList, typenum, typenum::Unsigned}; pub use superstruct::superstruct; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 7e170365b2c..88558254e80 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -1,9 +1,9 @@ use crate::context_deserialize; use crate::{ - light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec, ContextDeserialize, - EthSpec, FixedVector, ForkName, Hash256, LightClientHeader, LightClientHeaderAltair, - LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderElectra, - LightClientHeaderFulu, SignedBlindedBeaconBlock, Slot, SyncCommittee, + BeaconState, ChainSpec, ContextDeserialize, EthSpec, FixedVector, ForkName, Hash256, + LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, LightClientHeaderFulu, SignedBlindedBeaconBlock, Slot, SyncCommittee, + light_client_update::*, test_utils::TestRandom, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -110,7 +110,7 @@ impl LightClientBootstrap { ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientBootstrap decoding for {fork_name} not implemented" - ))) + ))); } }; @@ -235,7 +235,7 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientBootstrap return Err(serde::de::Error::custom(format!( "LightClientBootstrap failed to deserialize: unsupported fork '{}'", context - ))) + ))); } ForkName::Altair | ForkName::Bellatrix => { Self::Altair(Deserialize::deserialize(deserializer).map_err(convert_err)?) diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 0f572a856fc..df5954d4968 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -1,10 +1,10 @@ use super::{EthSpec, FixedVector, Hash256, LightClientHeader, Slot, SyncAggregate}; -use crate::context_deserialize; use crate::ChainSpec; +use crate::context_deserialize; use crate::{ - light_client_update::*, test_utils::TestRandom, ContextDeserialize, ForkName, - LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, SignedBlindedBeaconBlock, + ContextDeserialize, ForkName, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + SignedBlindedBeaconBlock, light_client_update::*, test_utils::TestRandom, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -217,7 +217,7 @@ impl LightClientFinalityUpdate { ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientFinalityUpdate decoding for {fork_name} not implemented" - ))) + ))); } }; @@ -270,7 +270,7 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientFinalityU return Err(serde::de::Error::custom(format!( "LightClientFinalityUpdate failed to deserialize: unsupported fork '{}'", context - ))) + ))); } ForkName::Altair | ForkName::Bellatrix => { Self::Altair(Deserialize::deserialize(deserializer).map_err(convert_err)?) diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index c36a1c2111d..676c1642e63 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -1,13 +1,13 @@ -use crate::context_deserialize; use crate::ChainSpec; -use crate::{light_client_update::*, BeaconBlockBody}; +use crate::context_deserialize; +use crate::{BeaconBlockBody, light_client_update::*}; +use crate::{BeaconBlockHeader, ExecutionPayloadHeader}; +use crate::{ContextDeserialize, ForkName}; use crate::{ - test_utils::TestRandom, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, FixedVector, Hash256, - SignedBlindedBeaconBlock, + SignedBlindedBeaconBlock, test_utils::TestRandom, }; -use crate::{BeaconBlockHeader, ExecutionPayloadHeader}; -use crate::{ContextDeserialize, ForkName}; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; @@ -128,7 +128,7 @@ impl LightClientHeader { ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientHeader decoding for {fork_name} not implemented" - ))) + ))); } }; @@ -356,7 +356,7 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientHeader return Err(serde::de::Error::custom(format!( "LightClientFinalityUpdate failed to deserialize: unsupported fork '{}'", context - ))) + ))); } ForkName::Altair | ForkName::Bellatrix => { Self::Altair(Deserialize::deserialize(deserializer).map_err(convert_err)?) diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 1bff0df0614..48e5e46ffe1 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -2,9 +2,9 @@ use super::{ContextDeserialize, EthSpec, ForkName, LightClientHeader, Slot, Sync use crate::context_deserialize; use crate::test_utils::TestRandom; use crate::{ - light_client_update::*, ChainSpec, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, - SignedBlindedBeaconBlock, + ChainSpec, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, LightClientHeaderFulu, SignedBlindedBeaconBlock, + light_client_update::*, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -177,7 +177,7 @@ impl LightClientOptimisticUpdate { ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientOptimisticUpdate decoding for {fork_name} not implemented" - ))) + ))); } }; @@ -229,7 +229,7 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientOptimisti return Err(serde::de::Error::custom(format!( "LightClientOptimisticUpdate failed to deserialize: unsupported fork '{}'", context - ))) + ))); } ForkName::Altair | ForkName::Bellatrix => { Self::Altair(Deserialize::deserialize(deserializer).map_err(convert_err)?) diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 87976dbedb5..8a413f7e14c 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,11 +1,11 @@ use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; +use crate::LightClientHeader; use crate::context_deserialize; use crate::light_client_header::LightClientHeaderElectra; -use crate::LightClientHeader; use crate::{ - beacon_state, test_utils::TestRandom, ChainSpec, ContextDeserialize, Epoch, ForkName, - LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderFulu, SignedBlindedBeaconBlock, + ChainSpec, ContextDeserialize, Epoch, ForkName, LightClientHeaderAltair, + LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderFulu, + SignedBlindedBeaconBlock, beacon_state, test_utils::TestRandom, }; use derivative::Derivative; use safe_arith::ArithError; @@ -199,7 +199,7 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientUpdate return Err(serde::de::Error::custom(format!( "LightClientUpdate failed to deserialize: unsupported fork '{}'", context - ))) + ))); } ForkName::Altair | ForkName::Bellatrix => { Self::Altair(Deserialize::deserialize(deserializer).map_err(convert_err)?) @@ -376,7 +376,7 @@ impl LightClientUpdate { ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientUpdate decoding for {fork_name} not implemented" - ))) + ))); } }; diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index 3e29ca83e80..e59efc51704 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -1,4 +1,4 @@ -use crate::{consts::altair::NUM_FLAG_INDICES, test_utils::TestRandom, Hash256}; +use crate::{Hash256, consts::altair::NUM_FLAG_INDICES, test_utils::TestRandom}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 1f7edfcacaa..b2866ecfd1f 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -305,18 +305,10 @@ impl ExecPayload for FullPayload { fn withdrawals_root(&self) -> Result { match self { FullPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), - FullPayload::Capella(ref inner) => { - Ok(inner.execution_payload.withdrawals.tree_hash_root()) - } - FullPayload::Deneb(ref inner) => { - Ok(inner.execution_payload.withdrawals.tree_hash_root()) - } - FullPayload::Electra(ref inner) => { - Ok(inner.execution_payload.withdrawals.tree_hash_root()) - } - FullPayload::Fulu(ref inner) => { - Ok(inner.execution_payload.withdrawals.tree_hash_root()) - } + FullPayload::Capella(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), + FullPayload::Deneb(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), + FullPayload::Electra(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), + FullPayload::Fulu(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), } } @@ -325,9 +317,9 @@ impl ExecPayload for FullPayload { FullPayload::Bellatrix(_) | FullPayload::Capella(_) => { Err(Error::IncorrectStateVariant) } - FullPayload::Deneb(ref inner) => Ok(inner.execution_payload.blob_gas_used), - FullPayload::Electra(ref inner) => Ok(inner.execution_payload.blob_gas_used), - FullPayload::Fulu(ref inner) => Ok(inner.execution_payload.blob_gas_used), + FullPayload::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), + FullPayload::Electra(inner) => Ok(inner.execution_payload.blob_gas_used), + FullPayload::Fulu(inner) => Ok(inner.execution_payload.blob_gas_used), } } @@ -650,14 +642,10 @@ impl ExecPayload for BlindedPayload { fn withdrawals_root(&self) -> Result { match self { BlindedPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), - BlindedPayload::Capella(ref inner) => { - Ok(inner.execution_payload_header.withdrawals_root) - } - BlindedPayload::Deneb(ref inner) => Ok(inner.execution_payload_header.withdrawals_root), - BlindedPayload::Electra(ref inner) => { - Ok(inner.execution_payload_header.withdrawals_root) - } - BlindedPayload::Fulu(ref inner) => Ok(inner.execution_payload_header.withdrawals_root), + BlindedPayload::Capella(inner) => Ok(inner.execution_payload_header.withdrawals_root), + BlindedPayload::Deneb(inner) => Ok(inner.execution_payload_header.withdrawals_root), + BlindedPayload::Electra(inner) => Ok(inner.execution_payload_header.withdrawals_root), + BlindedPayload::Fulu(inner) => Ok(inner.execution_payload_header.withdrawals_root), } } @@ -666,9 +654,9 @@ impl ExecPayload for BlindedPayload { BlindedPayload::Bellatrix(_) | BlindedPayload::Capella(_) => { Err(Error::IncorrectStateVariant) } - BlindedPayload::Deneb(ref inner) => Ok(inner.execution_payload_header.blob_gas_used), - BlindedPayload::Electra(ref inner) => Ok(inner.execution_payload_header.blob_gas_used), - BlindedPayload::Fulu(ref inner) => Ok(inner.execution_payload_header.blob_gas_used), + BlindedPayload::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), + BlindedPayload::Electra(inner) => Ok(inner.execution_payload_header.blob_gas_used), + BlindedPayload::Fulu(inner) => Ok(inner.execution_payload_header.blob_gas_used), } } diff --git a/consensus/types/src/pending_consolidation.rs b/consensus/types/src/pending_consolidation.rs index 4072c155645..9fb8c3566db 100644 --- a/consensus/types/src/pending_consolidation.rs +++ b/consensus/types/src/pending_consolidation.rs @@ -1,6 +1,6 @@ +use crate::ForkName; use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::ForkName; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 4a0a8c6ead0..a2a52e17c6a 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,4 +1,4 @@ -use crate::beacon_block_body::{format_kzg_commitments, BLOB_KZG_COMMITMENTS_INDEX}; +use crate::beacon_block_body::{BLOB_KZG_COMMITMENTS_INDEX, format_kzg_commitments}; use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index 77ca96b2a72..4a5ff2ec1a4 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -1,7 +1,7 @@ use crate::context_deserialize; use crate::{ - test_utils::TestRandom, BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, - PublicKey, Signature, SignedRoot, + BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, Signature, + SignedRoot, test_utils::TestRandom, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index 02213ed3117..0beffa1e04a 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -1,5 +1,5 @@ use crate::context_deserialize; -use crate::{test_utils::TestRandom, ForkName, VoluntaryExit}; +use crate::{ForkName, VoluntaryExit, test_utils::TestRandom}; use bls::Signature; use serde::{Deserialize, Serialize}; diff --git a/consensus/types/src/sqlite.rs b/consensus/types/src/sqlite.rs index 2f3f6d1c806..b6318dc4ce5 100644 --- a/consensus/types/src/sqlite.rs +++ b/consensus/types/src/sqlite.rs @@ -1,8 +1,8 @@ //! Implementations of SQLite compatibility traits. use crate::{Epoch, Slot}; use rusqlite::{ - types::{FromSql, FromSqlError, ToSql, ToSqlOutput, ValueRef}, Error, + types::{FromSql, FromSqlError, ToSql, ToSqlOutput, ValueRef}, }; macro_rules! impl_to_from_sql { diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 2a5d183a507..6ec8ca4a27f 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,7 +1,7 @@ //! Identifies each shard by an integer identifier. use crate::SingleAttestation; use crate::{AttestationRef, ChainSpec, CommitteeIndex, EthSpec, Slot}; -use alloy_primitives::{bytes::Buf, U256}; +use alloy_primitives::{U256, bytes::Buf}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index bad7797e301..db22a3bdbc8 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -1,7 +1,7 @@ use super::{AggregateSignature, EthSpec, ForkName, SignedRoot}; use crate::context_deserialize; use crate::slot_data::SlotData; -use crate::{test_utils::TestRandom, BitVector, Hash256, Slot, SyncCommitteeMessage}; +use crate::{BitVector, Hash256, Slot, SyncCommitteeMessage, test_utils::TestRandom}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 1ce7d0c13f3..3d0d853fcaa 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -1,6 +1,6 @@ //! Identifies each sync committee subnet by an integer identifier. -use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::EthSpec; +use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_types::typenum::Unsigned; diff --git a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs index cf240c3f1f0..0f52e485a8a 100644 --- a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs +++ b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs @@ -72,12 +72,12 @@ pub fn generate_blobs(n_blobs: usize) -> Result, Stri #[cfg(test)] mod test { use super::*; - use rand::thread_rng; + use rand::rng; #[test] fn test_verify_blob_inclusion_proof() { let (_block, blobs) = - generate_rand_block_and_blobs::(ForkName::Deneb, 6, &mut thread_rng()); + generate_rand_block_and_blobs::(ForkName::Deneb, 6, &mut rng()); for blob in blobs { assert!(blob.verify_blob_sidecar_inclusion_proof()); } @@ -86,7 +86,7 @@ mod test { #[test] fn test_verify_blob_inclusion_proof_from_existing_proof() { let (block, mut blob_sidecars) = - generate_rand_block_and_blobs::(ForkName::Deneb, 1, &mut thread_rng()); + generate_rand_block_and_blobs::(ForkName::Deneb, 1, &mut rng()); let BlobSidecar { index, blob, @@ -115,10 +115,10 @@ mod test { #[test] fn test_verify_blob_inclusion_proof_invalid() { let (_block, blobs) = - generate_rand_block_and_blobs::(ForkName::Deneb, 6, &mut thread_rng()); + generate_rand_block_and_blobs::(ForkName::Deneb, 6, &mut rng()); for mut blob in blobs { - blob.kzg_commitment_inclusion_proof = FixedVector::random_for_test(&mut thread_rng()); + blob.kzg_commitment_inclusion_proof = FixedVector::random_for_test(&mut rng()); assert!(!blob.verify_blob_sidecar_inclusion_proof()); } } diff --git a/consensus/types/src/test_utils/macros.rs b/consensus/types/src/test_utils/macros.rs index 4fd7720689d..662527f5a4e 100644 --- a/consensus/types/src/test_utils/macros.rs +++ b/consensus/types/src/test_utils/macros.rs @@ -13,7 +13,7 @@ macro_rules! ssz_tests { ($type: ty) => { #[test] pub fn test_ssz_round_trip() { - use ssz::{ssz_encode, Decode}; + use ssz::{Decode, ssz_encode}; use $crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; let mut rng = XorShiftRng::from_seed([42; 16]); diff --git a/consensus/types/src/test_utils/mod.rs b/consensus/types/src/test_utils/mod.rs index 9599bcd3641..37d58d43420 100644 --- a/consensus/types/src/test_utils/mod.rs +++ b/consensus/types/src/test_utils/mod.rs @@ -8,8 +8,8 @@ pub use rand_xorshift::XorShiftRng; pub use generate_deterministic_keypairs::generate_deterministic_keypair; pub use generate_deterministic_keypairs::generate_deterministic_keypairs; pub use generate_deterministic_keypairs::load_keypairs_from_yaml; -use ssz::{ssz_encode, Decode, Encode}; -pub use test_random::{test_random_instance, TestRandom}; +use ssz::{Decode, Encode, ssz_encode}; +pub use test_random::{TestRandom, test_random_instance}; use tree_hash::TreeHash; #[macro_use] diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 00355779d2d..98bb8565dd6 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -2,7 +2,7 @@ use crate::*; use rand::RngCore; use rand::SeedableRng; use rand_xorshift::XorShiftRng; -use smallvec::{smallvec, SmallVec}; +use smallvec::{SmallVec, smallvec}; use std::marker::PhantomData; use std::sync::Arc; diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index dc97c8821b4..dec8bba627f 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,7 +1,7 @@ use crate::context_deserialize; use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, + Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, ForkName, + Hash256, PublicKeyBytes, test_utils::TestRandom, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 60900350385..42d792a814d 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -1,7 +1,7 @@ use crate::context_deserialize; use crate::{ - test_utils::TestRandom, ChainSpec, Domain, Epoch, ForkName, Hash256, SecretKey, SignedRoot, - SignedVoluntaryExit, + ChainSpec, Domain, Epoch, ForkName, Hash256, SecretKey, SignedRoot, SignedVoluntaryExit, + test_utils::TestRandom, }; use serde::{Deserialize, Serialize}; diff --git a/crypto/bls/src/generic_aggregate_public_key.rs b/crypto/bls/src/generic_aggregate_public_key.rs index 426e165fb7e..aea23ca63c5 100644 --- a/crypto/bls/src/generic_aggregate_public_key.rs +++ b/crypto/bls/src/generic_aggregate_public_key.rs @@ -1,6 +1,6 @@ use crate::{ - generic_public_key::{GenericPublicKey, TPublicKey}, Error, + generic_public_key::{GenericPublicKey, TPublicKey}, }; use std::fmt::{self, Debug}; use std::marker::PhantomData; diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index e6e53253f64..98a634ee11f 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -1,8 +1,8 @@ use crate::{ + Error, Hash256, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, generic_aggregate_public_key::TAggregatePublicKey, generic_public_key::{GenericPublicKey, TPublicKey}, generic_signature::{GenericSignature, TSignature}, - Error, Hash256, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index 80b42dfa714..122a47c15dd 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -1,5 +1,5 @@ -use crate::generic_public_key_bytes::GenericPublicKeyBytes; use crate::Error; +use crate::generic_public_key_bytes::GenericPublicKeyBytes; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_utils::hex::encode as hex_encode; diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index 985bff745c6..6df4f3b0b07 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -1,6 +1,6 @@ use crate::{ - generic_public_key::{GenericPublicKey, TPublicKey}, Error, PUBLIC_KEY_BYTES_LEN, + generic_public_key::{GenericPublicKey, TPublicKey}, }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; diff --git a/crypto/bls/src/generic_secret_key.rs b/crypto/bls/src/generic_secret_key.rs index 62bfc1467db..813693ee0ac 100644 --- a/crypto/bls/src/generic_secret_key.rs +++ b/crypto/bls/src/generic_secret_key.rs @@ -1,7 +1,7 @@ use crate::{ + Error, Hash256, ZeroizeHash, generic_public_key::{GenericPublicKey, TPublicKey}, generic_signature::{GenericSignature, TSignature}, - Error, Hash256, ZeroizeHash, }; use std::marker::PhantomData; diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 0b375d3edd5..e59efa3b3e5 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -1,6 +1,6 @@ use crate::{ - generic_public_key::{GenericPublicKey, TPublicKey}, Error, Hash256, + generic_public_key::{GenericPublicKey, TPublicKey}, }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; diff --git a/crypto/bls/src/generic_signature_bytes.rs b/crypto/bls/src/generic_signature_bytes.rs index b291adb7357..b6d0a7d8b0d 100644 --- a/crypto/bls/src/generic_signature_bytes.rs +++ b/crypto/bls/src/generic_signature_bytes.rs @@ -1,7 +1,7 @@ use crate::{ + Error, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, generic_public_key::TPublicKey, generic_signature::{GenericSignature, TSignature}, - Error, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; diff --git a/crypto/bls/src/generic_signature_set.rs b/crypto/bls/src/generic_signature_set.rs index a64db7adef4..bfcf1492014 100644 --- a/crypto/bls/src/generic_signature_set.rs +++ b/crypto/bls/src/generic_signature_set.rs @@ -1,9 +1,9 @@ use crate::{ + Hash256, generic_aggregate_public_key::TAggregatePublicKey, generic_aggregate_signature::{GenericAggregateSignature, TAggregateSignature}, generic_public_key::{GenericPublicKey, TPublicKey}, generic_signature::{GenericSignature, TSignature}, - Hash256, }; use std::borrow::Cow; use std::marker::PhantomData; diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index 6ca0fe09b2d..c1ed2c7177e 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -1,29 +1,28 @@ use crate::{ + BlstError, Error, Hash256, INFINITY_SIGNATURE, ZeroizeHash, generic_aggregate_public_key::TAggregatePublicKey, generic_aggregate_signature::TAggregateSignature, generic_public_key::{ - GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, + GenericPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, TPublicKey, }, generic_secret_key::TSecretKey, - generic_signature::{TSignature, SIGNATURE_BYTES_LEN, SIGNATURE_UNCOMPRESSED_BYTES_LEN}, - BlstError, Error, Hash256, ZeroizeHash, INFINITY_SIGNATURE, + generic_signature::{SIGNATURE_BYTES_LEN, SIGNATURE_UNCOMPRESSED_BYTES_LEN, TSignature}, }; pub use blst::min_pk as blst_core; -use blst::{blst_scalar, BLST_ERROR}; +use blst::{BLST_ERROR, blst_scalar}; use rand::Rng; - pub const DST: &[u8] = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"; pub const RAND_BITS: usize = 64; /// Provides the externally-facing, core BLS types. pub mod types { + pub use super::BlstAggregatePublicKey as AggregatePublicKey; + pub use super::BlstAggregateSignature as AggregateSignature; + pub use super::SignatureSet; pub use super::blst_core::PublicKey; pub use super::blst_core::SecretKey; pub use super::blst_core::Signature; pub use super::verify_signature_sets; - pub use super::BlstAggregatePublicKey as AggregatePublicKey; - pub use super::BlstAggregateSignature as AggregateSignature; - pub use super::SignatureSet; } pub type SignatureSet<'a> = crate::generic_signature_set::GenericSignatureSet< @@ -43,7 +42,7 @@ pub fn verify_signature_sets<'a>( return false; } - let rng = &mut rand::thread_rng(); + let rng = &mut rand::rng(); let mut rands: Vec = Vec::with_capacity(sets.len()); let mut msgs_refs = Vec::with_capacity(sets.len()); @@ -55,7 +54,7 @@ pub fn verify_signature_sets<'a>( let mut vals = [0u64; 4]; while vals[0] == 0 { // Do not use zero - vals[0] = rng.gen(); + vals[0] = rng.random(); } let mut rand_i = std::mem::MaybeUninit::::uninit(); @@ -284,8 +283,8 @@ impl TAggregateSignature for blst_core::SecretKey { fn random() -> Self { - let rng = &mut rand::thread_rng(); - let ikm: [u8; 32] = rng.gen(); + let rng = &mut rand::rng(); + let ikm: [u8; 32] = rng.random(); Self::key_gen(&ikm, &[]).unwrap() } diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index 7273697597b..e7eee050775 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -1,23 +1,23 @@ use crate::{ + Error, Hash256, INFINITY_PUBLIC_KEY, INFINITY_SIGNATURE, ZeroizeHash, generic_aggregate_public_key::TAggregatePublicKey, generic_aggregate_signature::TAggregateSignature, generic_public_key::{ - GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, + GenericPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, TPublicKey, }, - generic_secret_key::{TSecretKey, SECRET_KEY_BYTES_LEN}, - generic_signature::{TSignature, SIGNATURE_BYTES_LEN, SIGNATURE_UNCOMPRESSED_BYTES_LEN}, - Error, Hash256, ZeroizeHash, INFINITY_PUBLIC_KEY, INFINITY_SIGNATURE, + generic_secret_key::{SECRET_KEY_BYTES_LEN, TSecretKey}, + generic_signature::{SIGNATURE_BYTES_LEN, SIGNATURE_UNCOMPRESSED_BYTES_LEN, TSignature}, }; /// Provides the externally-facing, core BLS types. pub mod types { - pub use super::verify_signature_sets; pub use super::AggregatePublicKey; pub use super::AggregateSignature; pub use super::PublicKey; pub use super::SecretKey; pub use super::Signature; pub use super::SignatureSet; + pub use super::verify_signature_sets; } pub type SignatureSet<'a> = crate::generic_signature_set::GenericSignatureSet< diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index ac2d83b2041..433eaef4f2a 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -94,7 +94,7 @@ macro_rules! define_mod { use crate::generics::*; - pub use bls_variant::{verify_signature_sets, SignatureSet}; + pub use bls_variant::{SignatureSet, verify_signature_sets}; pub type PublicKey = GenericPublicKey; pub type PublicKeyBytes = GenericPublicKeyBytes; diff --git a/crypto/eth2_key_derivation/src/derived_key.rs b/crypto/eth2_key_derivation/src/derived_key.rs index 21f98796d43..bdcb689bfc6 100644 --- a/crypto/eth2_key_derivation/src/derived_key.rs +++ b/crypto/eth2_key_derivation/src/derived_key.rs @@ -1,6 +1,6 @@ -use crate::{lamport_secret_key::LamportSecretKey, secret_bytes::SecretBytes, ZeroizeHash}; +use crate::{ZeroizeHash, lamport_secret_key::LamportSecretKey, secret_bytes::SecretBytes}; use num_bigint_dig::BigUint; -use ring::hkdf::{KeyType, Prk, Salt, HKDF_SHA256}; +use ring::hkdf::{HKDF_SHA256, KeyType, Prk, Salt}; use sha2::{Digest, Sha256}; use zeroize::Zeroize; @@ -333,8 +333,7 @@ mod test { fn get_raw_vector() -> RawTestVector { RawTestVector { seed: "0xc55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", - master_sk: - "6083874454709270928345386274498605044986640685124978867557563392430687146096", + master_sk: "6083874454709270928345386274498605044986640685124978867557563392430687146096", child_index: 0, lamport_0: vec![ "0xe345d0ad7be270737de05cf036f688f385d5f99c7fddb054837658bdd2ebd519", @@ -850,10 +849,8 @@ mod test { "0x8b28838382e6892f59c42a7709d6d38396495d3af5a8d5b0a60f172a6a8940bd", "0x261a605fa5f2a9bdc7cffac530edcf976e7ea7af4e443b625fe01ed39dad44b6", ], - compressed_lamport_pk: - "0xdd635d27d1d52b9a49df9e5c0c622360a4dd17cba7db4e89bce3cb048fb721a5", - child_sk: - "20397789859736650942317412262472558107875392172444076792671091975210932703118", + compressed_lamport_pk: "0xdd635d27d1d52b9a49df9e5c0c622360a4dd17cba7db4e89bce3cb048fb721a5", + child_sk: "20397789859736650942317412262472558107875392172444076792671091975210932703118", } } } diff --git a/crypto/eth2_keystore/src/keystore.rs b/crypto/eth2_keystore/src/keystore.rs index 16a979cf63a..b31e32eb4a8 100644 --- a/crypto/eth2_keystore/src/keystore.rs +++ b/crypto/eth2_keystore/src/keystore.rs @@ -1,23 +1,24 @@ //! Provides a JSON keystore for a BLS keypair, as specified by //! [EIP-2335](https://eips.ethereum.org/EIPS/eip-2335). +use crate::Uuid; use crate::derived_key::DerivedKey; use crate::json_keystore::{ Aes128Ctr, ChecksumModule, Cipher, CipherModule, Crypto, EmptyMap, EmptyString, JsonKeystore, Kdf, KdfModule, Scrypt, Sha256Checksum, Version, }; -use crate::Uuid; +use aes::Aes128Ctr as AesCtr; use aes::cipher::generic_array::GenericArray; use aes::cipher::{NewCipher, StreamCipher}; -use aes::Aes128Ctr as AesCtr; use bls::{Keypair, PublicKey, SecretKey, ZeroizeHash}; use eth2_key_derivation::PlainText; use hmac::Hmac; use pbkdf2::pbkdf2; use rand::prelude::*; use scrypt::{ + Params as ScryptParams, errors::{InvalidOutputLen, InvalidParams}, - scrypt, Params as ScryptParams, + scrypt, }; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -103,8 +104,8 @@ impl<'a> KeystoreBuilder<'a> { if password.is_empty() { Err(Error::EmptyPassword) } else { - let salt = rand::thread_rng().gen::<[u8; SALT_SIZE]>(); - let iv = rand::thread_rng().gen::<[u8; IV_SIZE]>().to_vec().into(); + let salt = rand::rng().random::<[u8; SALT_SIZE]>(); + let iv = rand::rng().random::<[u8; IV_SIZE]>().to_vec().into(); Ok(Self { keypair, @@ -574,7 +575,10 @@ fn validate_parameters(kdf: &Kdf) -> Result<(), Error> { let default_kdf = Scrypt::default_scrypt(vec![0u8; 32]); let default_npr = 128 * default_kdf.n * default_kdf.p * default_kdf.r; if npr < default_npr { - eprintln!("WARN: Scrypt parameters are too weak (n: {}, p: {}, r: {}), we recommend (n: {}, p: {}, r: {})", params.n, params.p, params.r, default_kdf.n, default_kdf.p, default_kdf.r); + eprintln!( + "WARN: Scrypt parameters are too weak (n: {}, p: {}, r: {}), we recommend (n: {}, p: {}, r: {})", + params.n, params.p, params.r, default_kdf.n, default_kdf.p, default_kdf.r + ); } // Validate `salt` length. diff --git a/crypto/eth2_keystore/src/lib.rs b/crypto/eth2_keystore/src/lib.rs index afa5e75de3e..e1740c0a41a 100644 --- a/crypto/eth2_keystore/src/lib.rs +++ b/crypto/eth2_keystore/src/lib.rs @@ -9,7 +9,7 @@ pub mod json_keystore; pub use bls::ZeroizeHash; pub use eth2_key_derivation::PlainText; pub use keystore::{ - decrypt, default_kdf, encrypt, keypair_from_secret, Error, Keystore, KeystoreBuilder, DKLEN, - HASH_SIZE, IV_SIZE, SALT_SIZE, + DKLEN, Error, HASH_SIZE, IV_SIZE, Keystore, KeystoreBuilder, SALT_SIZE, decrypt, default_kdf, + encrypt, keypair_from_secret, }; pub use uuid::Uuid; diff --git a/crypto/eth2_keystore/tests/tests.rs b/crypto/eth2_keystore/tests/tests.rs index 20bf9f1653d..6849adbbdde 100644 --- a/crypto/eth2_keystore/tests/tests.rs +++ b/crypto/eth2_keystore/tests/tests.rs @@ -3,9 +3,8 @@ use bls::Keypair; use eth2_keystore::{ - default_kdf, + DKLEN, Error, Keystore, KeystoreBuilder, default_kdf, json_keystore::{Kdf, Pbkdf2, Prf, Scrypt}, - Error, Keystore, KeystoreBuilder, DKLEN, }; use std::fs::File; use tempfile::tempdir; diff --git a/crypto/eth2_wallet/src/lib.rs b/crypto/eth2_wallet/src/lib.rs index 492024d26e9..27b7e830b37 100644 --- a/crypto/eth2_wallet/src/lib.rs +++ b/crypto/eth2_wallet/src/lib.rs @@ -4,8 +4,8 @@ mod wallet; pub mod json_wallet; pub use bip39; -pub use validator_path::{KeyType, ValidatorPath, COIN_TYPE, PURPOSE}; +pub use validator_path::{COIN_TYPE, KeyType, PURPOSE, ValidatorPath}; pub use wallet::{ - recover_validator_secret, recover_validator_secret_from_mnemonic, DerivedKey, Error, - KeystoreError, PlainText, Uuid, ValidatorKeystores, Wallet, WalletBuilder, + DerivedKey, Error, KeystoreError, PlainText, Uuid, ValidatorKeystores, Wallet, WalletBuilder, + recover_validator_secret, recover_validator_secret_from_mnemonic, }; diff --git a/crypto/eth2_wallet/src/wallet.rs b/crypto/eth2_wallet/src/wallet.rs index 8bf70912167..bd9cb10ab23 100644 --- a/crypto/eth2_wallet/src/wallet.rs +++ b/crypto/eth2_wallet/src/wallet.rs @@ -1,17 +1,17 @@ use crate::{ + KeyType, ValidatorPath, json_wallet::{ Aes128Ctr, ChecksumModule, Cipher, CipherModule, Crypto, EmptyMap, EmptyString, JsonWallet, Kdf, KdfModule, Sha256Checksum, TypeField, Version, }, - KeyType, ValidatorPath, }; pub use bip39::{Mnemonic, Seed as Bip39Seed}; pub use eth2_key_derivation::{DerivedKey, DerivedKeyError}; +pub use eth2_keystore::{Error as KeystoreError, PlainText}; use eth2_keystore::{ - decrypt, default_kdf, encrypt, keypair_from_secret, Keystore, KeystoreBuilder, IV_SIZE, - SALT_SIZE, + IV_SIZE, Keystore, KeystoreBuilder, SALT_SIZE, decrypt, default_kdf, encrypt, + keypair_from_secret, }; -pub use eth2_keystore::{Error as KeystoreError, PlainText}; use rand::prelude::*; use serde::{Deserialize, Serialize}; use std::io::{Read, Write}; @@ -90,8 +90,8 @@ impl<'a> WalletBuilder<'a> { } else if seed.is_empty() { Err(Error::EmptySeed) } else { - let salt = rand::thread_rng().gen::<[u8; SALT_SIZE]>(); - let iv = rand::thread_rng().gen::<[u8; IV_SIZE]>().to_vec().into(); + let salt = rand::rng().random::<[u8; SALT_SIZE]>(); + let iv = rand::rng().random::<[u8; IV_SIZE]>().to_vec().into(); Ok(Self { seed: seed.to_vec().into(), diff --git a/crypto/eth2_wallet/tests/tests.rs b/crypto/eth2_wallet/tests/tests.rs index 3dc073f764d..812d33247ed 100644 --- a/crypto/eth2_wallet/tests/tests.rs +++ b/crypto/eth2_wallet/tests/tests.rs @@ -1,8 +1,9 @@ #![cfg(not(debug_assertions))] use eth2_wallet::{ + DerivedKey, Error, KeyType, KeystoreError, Wallet, WalletBuilder, bip39::{Language, Mnemonic, Seed}, - recover_validator_secret, DerivedKey, Error, KeyType, KeystoreError, Wallet, WalletBuilder, + recover_validator_secret, }; use std::fs::File; use tempfile::tempdir; diff --git a/database_manager/src/cli.rs b/database_manager/src/cli.rs index c62da1206f1..cb332546f94 100644 --- a/database_manager/src/cli.rs +++ b/database_manager/src/cli.rs @@ -1,6 +1,6 @@ pub use clap::{Arg, ArgAction, Args, Command, FromArgMatches, Parser}; -use clap_utils::get_color_style; use clap_utils::FLAG_HEADER; +use clap_utils::get_color_style; use serde::{Deserialize, Serialize}; use std::path::PathBuf; use store::hdiff::HierarchyConfig; diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index f45e4146b77..6bb7531493d 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -5,7 +5,7 @@ use crate::cli::PruneStates; use beacon_chain::{ builder::Witness, schema_change::migrate_schema, slot_clock::SystemTimeSlotClock, }; -use beacon_node::{get_data_dir, ClientConfig}; +use beacon_node::{ClientConfig, get_data_dir}; use clap::ArgMatches; use clap::ValueEnum; use cli::{Compact, Inspect}; @@ -16,10 +16,10 @@ use std::io::Write; use std::path::PathBuf; use store::KeyValueStore; use store::{ + DBColumn, HotColdDB, database::interface::BeaconNodeBackend, errors::Error, - metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}, - DBColumn, HotColdDB, + metadata::{CURRENT_SCHEMA_VERSION, SchemaVersion}, }; use strum::{EnumString, EnumVariantNames}; use tracing::{info, warn}; diff --git a/lcli/src/block_root.rs b/lcli/src/block_root.rs index 3c07d4f9ef0..497ce1a4385 100644 --- a/lcli/src/block_root.rs +++ b/lcli/src/block_root.rs @@ -30,7 +30,7 @@ use crate::transition_blocks::load_from_ssz_with; use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; use environment::Environment; -use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use eth2::{BeaconNodeHttpClient, SensitiveUrl, Timeouts, types::BlockId}; use eth2_network_config::Eth2NetworkConfig; use std::path::PathBuf; use std::time::{Duration, Instant}; diff --git a/lcli/src/check_deposit_data.rs b/lcli/src/check_deposit_data.rs index 47c2c7addf0..e4d985b5ebf 100644 --- a/lcli/src/check_deposit_data.rs +++ b/lcli/src/check_deposit_data.rs @@ -1,6 +1,6 @@ use clap::ArgMatches; use clap_utils::{parse_required, parse_ssz_required}; -use deposit_contract::{decode_eth1_tx_data, DEPOSIT_DATA_LEN}; +use deposit_contract::{DEPOSIT_DATA_LEN, decode_eth1_tx_data}; use tree_hash::TreeHash; pub fn run(matches: &ArgMatches) -> Result<(), String> { diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index b2fd7e7ec71..6fe13d17c33 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -1,8 +1,8 @@ use clap::ArgMatches; use lighthouse_network::{ - discovery::{build_enr, CombinedKey, CombinedKeyExt, ENR_FILENAME}, + NETWORK_KEY_FILENAME, NetworkConfig, + discovery::{CombinedKey, CombinedKeyExt, ENR_FILENAME, build_enr}, libp2p::identity::secp256k1, - NetworkConfig, NETWORK_KEY_FILENAME, }; use std::io::Write; use std::path::PathBuf; diff --git a/lcli/src/http_sync.rs b/lcli/src/http_sync.rs index cb6a9d2b1d9..2e36eadf235 100644 --- a/lcli/src/http_sync.rs +++ b/lcli/src/http_sync.rs @@ -2,8 +2,8 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{ - types::{BlockId, ChainSpec, ForkName, PublishBlockRequest, SignedBlockContents}, BeaconNodeHttpClient, Error, SensitiveUrl, Timeouts, + types::{BlockId, ChainSpec, ForkName, PublishBlockRequest, SignedBlockContents}, }; use eth2_network_config::Eth2NetworkConfig; use ssz::Encode; @@ -64,11 +64,11 @@ pub async fn run_async( next_block_id = BlockId::Root(block.parent_root()); blocks.push((block.slot(), publish_block_req)); - if let Some(ref common_ancestor_block) = maybe_common_ancestor_block { - if common_ancestor_block == &next_block_id { - println!("reached known common ancestor: {next_block_id:?}"); - break; - } + if let Some(ref common_ancestor_block) = maybe_common_ancestor_block + && common_ancestor_block == &next_block_id + { + println!("reached known common ancestor: {next_block_id:?}"); + break; } let block_exists_in_target = target @@ -86,12 +86,13 @@ pub async fn run_async( for (slot, block) in blocks.iter().rev() { println!("posting block at slot {slot}"); if let Err(e) = target.post_beacon_blocks(block).await { - if let Error::ServerMessage(ref e) = e { - if e.code == 202 { - println!("duplicate block detected while posting block at slot {slot}"); - continue; - } + if let Error::ServerMessage(ref e) = e + && e.code == 202 + { + println!("duplicate block detected while posting block at slot {slot}"); + continue; } + return Err(format!("error posting {slot}: {e:?}")); } else { println!("success"); diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 105100aeb16..fb471914dab 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -11,7 +11,7 @@ mod state_root; mod transition_blocks; use clap::{Arg, ArgAction, ArgMatches, Command}; -use clap_utils::{parse_optional, FLAG_HEADER}; +use clap_utils::{FLAG_HEADER, parse_optional}; use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::Eth2NetworkConfig; use parse_ssz::run_parse_ssz; diff --git a/lcli/src/mnemonic_validators.rs b/lcli/src/mnemonic_validators.rs index 2653aee149a..cc1f0cc2c77 100644 --- a/lcli/src/mnemonic_validators.rs +++ b/lcli/src/mnemonic_validators.rs @@ -1,9 +1,9 @@ -use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; +use account_utils::eth2_keystore::{Keystore, KeystoreBuilder, keypair_from_secret}; use account_utils::random_password; use clap::ArgMatches; use eth2_wallet::bip39::Seed; use eth2_wallet::bip39::{Language, Mnemonic}; -use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; +use eth2_wallet::{KeyType, recover_validator_secret_from_mnemonic}; use rayon::prelude::*; use std::fs; use std::path::PathBuf; diff --git a/lcli/src/mock_el.rs b/lcli/src/mock_el.rs index 2e2c27a2dbf..553cf505d74 100644 --- a/lcli/src/mock_el.rs +++ b/lcli/src/mock_el.rs @@ -4,7 +4,7 @@ use environment::Environment; use execution_layer::{ auth::JwtKey, test_utils::{ - Config, MockExecutionConfig, MockServer, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, + Config, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, MockExecutionConfig, MockServer, }, }; use std::net::Ipv4Addr; diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 9456f345703..88332c1a850 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -48,11 +48,11 @@ use crate::transition_blocks::load_from_ssz_with; use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; use environment::Environment; -use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use eth2::{BeaconNodeHttpClient, SensitiveUrl, Timeouts, types::StateId}; use eth2_network_config::Eth2NetworkConfig; use ssz::Encode; -use state_processing::state_advance::{complete_state_advance, partial_state_advance}; use state_processing::AllCaches; +use state_processing::state_advance::{complete_state_advance, partial_state_advance}; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; diff --git a/lcli/src/state_root.rs b/lcli/src/state_root.rs index 7b10ab9362f..b4bbae36c8b 100644 --- a/lcli/src/state_root.rs +++ b/lcli/src/state_root.rs @@ -2,7 +2,7 @@ use crate::transition_blocks::load_from_ssz_with; use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; use environment::Environment; -use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use eth2::{BeaconNodeHttpClient, SensitiveUrl, Timeouts, types::StateId}; use eth2_network_config::Eth2NetworkConfig; use std::path::PathBuf; use std::time::{Duration, Instant}; diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 2226105c341..69d3975d09b 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -68,15 +68,15 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{ - types::{BlockId, StateId}, BeaconNodeHttpClient, SensitiveUrl, Timeouts, + types::{BlockId, StateId}, }; use eth2_network_config::Eth2NetworkConfig; use ssz::Encode; use state_processing::state_advance::complete_state_advance; use state_processing::{ - block_signature_verifier::BlockSignatureVerifier, per_block_processing, AllCaches, - BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, + AllCaches, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, + block_signature_verifier::BlockSignatureVerifier, per_block_processing, }; use std::borrow::Cow; use std::fs::File; @@ -184,7 +184,7 @@ pub fn run( return Err( "must supply *both* --pre-state-path and --block-path *or* only --beacon-url" .into(), - ) + ); } }; @@ -354,10 +354,9 @@ fn do_transition( let mut ctxt = if let Some(ctxt) = saved_ctxt { ctxt.clone() } else { - let ctxt = ConsensusContext::new(pre_state.slot()) + ConsensusContext::new(pre_state.slot()) .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()); - ctxt + .set_proposer_index(block.message().proposer_index()) }; if !config.no_signature_verification { diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index a66b7e128f4..13a5a7a8038 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -9,10 +9,10 @@ use eth2_config::Eth2Config; use eth2_network_config::Eth2NetworkConfig; -use futures::channel::mpsc::{channel, Receiver, Sender}; -use futures::{future, StreamExt}; -use logging::tracing_logging_layer::LoggingLayer; +use futures::channel::mpsc::{Receiver, Sender, channel}; +use futures::{StreamExt, future}; use logging::SSELoggingComponents; +use logging::tracing_logging_layer::LoggingLayer; use logroller::{Compression, LogRollerBuilder, Rotation, RotationSize}; use serde::{Deserialize, Serialize}; use std::path::PathBuf; @@ -27,7 +27,7 @@ use types::{EthSpec, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; use { futures::Future, std::{pin::Pin, task::Context, task::Poll}, - tokio::signal::unix::{signal, Signal, SignalKind}, + tokio::signal::unix::{Signal, SignalKind, signal}, }; #[cfg(not(target_family = "unix"))] diff --git a/lighthouse/environment/src/tracing_common.rs b/lighthouse/environment/src/tracing_common.rs index d78eb0d85a4..5ba014f7596 100644 --- a/lighthouse/environment/src/tracing_common.rs +++ b/lighthouse/environment/src/tracing_common.rs @@ -2,7 +2,7 @@ use crate::{EnvironmentBuilder, LoggerConfig}; use clap::ArgMatches; use logging::Libp2pDiscv5TracingLayer; use logging::{ - create_libp2p_discv5_tracing_layer, tracing_logging_layer::LoggingLayer, SSELoggingComponents, + SSELoggingComponents, create_libp2p_discv5_tracing_layer, tracing_logging_layer::LoggingLayer, }; use std::process; diff --git a/lighthouse/environment/tests/environment_builder.rs b/lighthouse/environment/tests/environment_builder.rs index a98caf8df5b..71acafeca8c 100644 --- a/lighthouse/environment/tests/environment_builder.rs +++ b/lighthouse/environment/tests/environment_builder.rs @@ -1,7 +1,7 @@ #![cfg(test)] use environment::EnvironmentBuilder; -use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; +use eth2_network_config::{DEFAULT_HARDCODED_NETWORK, Eth2NetworkConfig}; use std::path::PathBuf; use types::{Config, MainnetEthSpec}; diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index cb96b4904f2..40ad9f26916 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -7,17 +7,17 @@ use clap::FromArgMatches; use clap::Subcommand; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::{ - flags::DISABLE_MALLOC_TUNING_FLAG, get_color_style, get_eth2_network_config, FLAG_HEADER, + FLAG_HEADER, flags::DISABLE_MALLOC_TUNING_FLAG, get_color_style, get_eth2_network_config, }; use cli::LighthouseSubcommands; -use directory::{parse_path_or_default, DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR}; +use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR, parse_path_or_default}; use environment::tracing_common; use environment::{EnvironmentBuilder, LoggerConfig}; -use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; +use eth2_network_config::{DEFAULT_HARDCODED_NETWORK, Eth2NetworkConfig, HARDCODED_NET_NAMES}; use ethereum_hashing::have_sha_extensions; use futures::TryFutureExt; use lighthouse_version::VERSION; -use logging::{build_workspace_filter, crit, MetricsLayer}; +use logging::{MetricsLayer, build_workspace_filter, crit}; use malloc_utils::configure_memory_allocator; use opentelemetry::trace::TracerProvider; use opentelemetry_otlp::WithExportConfig; @@ -27,8 +27,8 @@ use std::path::PathBuf; use std::process::exit; use std::sync::LazyLock; use task_executor::ShutdownReason; -use tracing::{info, warn, Level}; -use tracing_subscriber::{filter::EnvFilter, layer::SubscriberExt, util::SubscriberInitExt, Layer}; +use tracing::{Level, info, warn}; +use tracing_subscriber::{Layer, filter::EnvFilter, layer::SubscriberExt, util::SubscriberInitExt}; use types::{EthSpec, EthSpecId}; use validator_client::ProductionValidatorClient; @@ -101,7 +101,12 @@ fn build_profile_name() -> String { fn main() { // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. if std::env::var("RUST_BACKTRACE").is_err() { - std::env::set_var("RUST_BACKTRACE", "1"); + // `set_var` is marked unsafe because it is unsafe to use if there are multiple threads + // reading or writing from the environment. We are at the very beginning of execution and + // have not spun up any threads or the tokio runtime, so it is safe to use. + unsafe { + std::env::set_var("RUST_BACKTRACE", "1"); + } } // Parse the CLI parameters. @@ -466,15 +471,16 @@ fn main() { // Only apply this optimization for the beacon node. It's the only process with a substantial // memory footprint. let is_beacon_node = matches.subcommand_name() == Some("beacon_node"); - if is_beacon_node && !matches.get_flag(DISABLE_MALLOC_TUNING_FLAG) { - if let Err(e) = configure_memory_allocator() { - eprintln!( - "Unable to configure the memory allocator: {} \n\ + if is_beacon_node + && !matches.get_flag(DISABLE_MALLOC_TUNING_FLAG) + && let Err(e) = configure_memory_allocator() + { + eprintln!( + "Unable to configure the memory allocator: {} \n\ Try providing the --{} flag", - e, DISABLE_MALLOC_TUNING_FLAG - ); - exit(1) - } + e, DISABLE_MALLOC_TUNING_FLAG + ); + exit(1) } let result = get_eth2_network_config(&matches).and_then(|eth2_network_config| { diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index d53d042fa4e..0b945bcb2d4 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -1,30 +1,31 @@ use account_manager::{ + CMD as ACCOUNT_CMD, WALLETS_DIR_FLAG, validator::{ + CMD as VALIDATOR_CMD, create::*, import::{self, CMD as IMPORT_CMD}, modify::{ALL, CMD as MODIFY_CMD, DISABLE, ENABLE, PUBKEY_FLAG}, - CMD as VALIDATOR_CMD, }, wallet::{ + CMD as WALLET_CMD, create::{CMD as CREATE_CMD, *}, list::CMD as LIST_CMD, - CMD as WALLET_CMD, }, - CMD as ACCOUNT_CMD, WALLETS_DIR_FLAG, *, + *, }; use account_utils::{ + STDIN_INPUTS_FLAG, eth2_keystore::KeystoreBuilder, validator_definitions::{SigningDefinition, ValidatorDefinition, ValidatorDefinitions}, - STDIN_INPUTS_FLAG, }; -use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use std::env; use std::fs::{self, File}; use std::io::{BufRead, BufReader, Write}; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output, Stdio}; use std::str::from_utf8; -use tempfile::{tempdir, TempDir}; +use tempfile::{TempDir, tempdir}; use types::{Keypair, PublicKey}; use validator_dir::ValidatorDir; use zeroize::Zeroizing; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 85db2a51c1b..38fd54d29dd 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1,11 +1,12 @@ use crate::exec::{CommandLineTestExec, CompletedTest}; use beacon_node::beacon_chain::chain_config::{ - DisallowedReOrgOffsets, DEFAULT_RE_ORG_CUTOFF_DENOMINATOR, DEFAULT_RE_ORG_HEAD_THRESHOLD, + DEFAULT_RE_ORG_CUTOFF_DENOMINATOR, DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_SYNC_TOLERANCE_EPOCHS, + DisallowedReOrgOffsets, }; use beacon_node::{ - beacon_chain::graffiti_calculator::GraffitiOrigin, - beacon_chain::store::config::DatabaseBackend as BeaconNodeBackend, ClientConfig as Config, + ClientConfig as Config, beacon_chain::graffiti_calculator::GraffitiOrigin, + beacon_chain::store::config::DatabaseBackend as BeaconNodeBackend, }; use beacon_processor::BeaconProcessorConfig; use lighthouse_network::PeerId; @@ -2818,10 +2819,12 @@ fn invalid_block_roots_default_holesky() { .run_with_zero_port() .with_config(|config| { assert_eq!(config.chain.invalid_block_roots.len(), 1); - assert!(config - .chain - .invalid_block_roots - .contains(&*INVALID_HOLESKY_BLOCK_ROOT)); + assert!( + config + .chain + .invalid_block_roots + .contains(&*INVALID_HOLESKY_BLOCK_ROOT) + ); }) } diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index b243cd6001e..bd1cd7574e4 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -3,8 +3,8 @@ use boot_node::config::BootNodeConfigSerialization; use crate::exec::{CommandLineTestExec, CompletedTest}; use clap::ArgMatches; use clap_utils::get_eth2_network_config; -use lighthouse_network::discovery::ENR_FILENAME; use lighthouse_network::Enr; +use lighthouse_network::discovery::ENR_FILENAME; use std::fs::File; use std::io::Write; use std::net::Ipv4Addr; diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 7bda1868c85..913011ea3a2 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -1,4 +1,4 @@ -use beacon_node_fallback::{beacon_node_health::BeaconNodeSyncDistanceTiers, ApiTopic}; +use beacon_node_fallback::{ApiTopic, beacon_node_health::BeaconNodeSyncDistanceTiers}; use crate::exec::CommandLineTestExec; use bls::{Keypair, PublicKeyBytes}; diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index 5ee9b0263a9..99afa7b6824 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -5,7 +5,7 @@ use std::marker::PhantomData; use std::path::PathBuf; use std::process::{Command, Stdio}; use std::str::FromStr; -use tempfile::{tempdir, TempDir}; +use tempfile::{TempDir, tempdir}; use types::*; use validator_manager::{ create_validators::CreateConfig, @@ -367,7 +367,7 @@ pub fn validator_move_misc_flags_1() { dest_vc_url: SensitiveUrl::parse("http://localhost:2").unwrap(), dest_vc_token_path: PathBuf::from("./2.json"), validators: Validators::Specific(vec![ - PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap() + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap(), ]), builder_proposals: Some(false), builder_boost_factor: None, @@ -399,7 +399,7 @@ pub fn validator_move_misc_flags_2() { dest_vc_url: SensitiveUrl::parse("http://localhost:2").unwrap(), dest_vc_token_path: PathBuf::from("./2.json"), validators: Validators::Specific(vec![ - PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap() + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap(), ]), builder_proposals: Some(false), builder_boost_factor: Some(100), diff --git a/slasher/service/src/service.rs b/slasher/service/src/service.rs index 013ef03fc0e..c0e6a8a0cd8 100644 --- a/slasher/service/src/service.rs +++ b/slasher/service/src/service.rs @@ -1,25 +1,25 @@ use beacon_chain::{ - observed_operations::ObservationOutcome, BeaconChain, BeaconChainError, BeaconChainTypes, + BeaconChain, BeaconChainError, BeaconChainTypes, observed_operations::ObservationOutcome, }; use directory::size_of_dir; use lighthouse_network::PubsubMessage; use network::NetworkMessage; use slasher::{ - metrics::{self, SLASHER_DATABASE_SIZE, SLASHER_RUN_TIME}, Slasher, + metrics::{self, SLASHER_DATABASE_SIZE, SLASHER_RUN_TIME}, }; use slot_clock::SlotClock; use state_processing::{ + VerifyOperation, per_block_processing::errors::{ AttesterSlashingInvalid, BlockOperationError, ProposerSlashingInvalid, }, - VerifyOperation, }; -use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError}; use std::sync::Arc; +use std::sync::mpsc::{Receiver, SyncSender, TrySendError, sync_channel}; use task_executor::TaskExecutor; use tokio::sync::mpsc::UnboundedSender; -use tokio::time::{interval_at, Duration, Instant}; +use tokio::time::{Duration, Instant, interval_at}; use tracing::{debug, error, info, trace, warn}; use types::{AttesterSlashing, Epoch, EthSpec, ProposerSlashing}; @@ -211,15 +211,14 @@ impl SlasherService { beacon_chain.import_attester_slashing(verified_slashing); // Publish to the network if broadcast is enabled. - if slasher.config().broadcast { - if let Err(e) = + if slasher.config().broadcast + && let Err(e) = Self::publish_attester_slashing(beacon_chain, network_sender, slashing) - { - debug!( - error = ?e, - "Unable to publish attester slashing" - ); - } + { + debug!( + error = ?e, + "Unable to publish attester slashing" + ); } } } @@ -260,15 +259,14 @@ impl SlasherService { }; beacon_chain.import_proposer_slashing(verified_slashing); - if slasher.config().broadcast { - if let Err(e) = + if slasher.config().broadcast + && let Err(e) = Self::publish_proposer_slashing(beacon_chain, network_sender, slashing) - { - debug!( - error = ?e, - "Unable to publish proposer slashing" - ); - } + { + debug!( + error = ?e, + "Unable to publish proposer slashing" + ); } } } diff --git a/slasher/src/array.rs b/slasher/src/array.rs index c61b9b54148..e375da4a712 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -6,7 +6,7 @@ use crate::{ use flate2::bufread::{ZlibDecoder, ZlibEncoder}; use serde::{Deserialize, Serialize}; use std::borrow::Borrow; -use std::collections::{btree_map::Entry, BTreeMap, HashSet}; +use std::collections::{BTreeMap, HashSet, btree_map::Entry}; use std::io::Read; use std::sync::Arc; use types::{AttesterSlashing, Epoch, EthSpec, IndexedAttestation}; diff --git a/slasher/src/attester_record.rs b/slasher/src/attester_record.rs index 1cd4ba7d4e0..67145193acc 100644 --- a/slasher/src/attester_record.rs +++ b/slasher/src/attester_record.rs @@ -1,9 +1,9 @@ -use crate::{database::IndexedAttestationId, Error}; +use crate::{Error, database::IndexedAttestationId}; use ssz_derive::{Decode, Encode}; use std::borrow::Cow; use std::sync::{ - atomic::{AtomicU64, Ordering}, Arc, + atomic::{AtomicU64, Ordering}, }; use tree_hash::TreeHash as _; use tree_hash_derive::TreeHash; diff --git a/slasher/src/database.rs b/slasher/src/database.rs index d5e0ed5d242..2df2849612e 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -4,8 +4,8 @@ mod mdbx_impl; mod redb_impl; use crate::{ - metrics, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Database, - Error, ProposerSlashingStatus, + AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Database, Error, + ProposerSlashingStatus, metrics, }; use byteorder::{BigEndian, ByteOrder}; use interface::{Environment, OpenDatabases, RwTransaction}; diff --git a/slasher/src/database/mdbx_impl.rs b/slasher/src/database/mdbx_impl.rs index f973e96120f..ede7249f040 100644 --- a/slasher/src/database/mdbx_impl.rs +++ b/slasher/src/database/mdbx_impl.rs @@ -1,12 +1,12 @@ #![cfg(feature = "mdbx")] use crate::{ + Config, Error, config::MEGABYTE, database::{ interface::{Key, OpenDatabases, Value}, *, }, - Config, Error, }; use mdbx::{DatabaseFlags, Geometry, WriteFlags}; use std::borrow::Cow; diff --git a/slasher/src/database/redb_impl.rs b/slasher/src/database/redb_impl.rs index 8b6f54fee6a..4198e826455 100644 --- a/slasher/src/database/redb_impl.rs +++ b/slasher/src/database/redb_impl.rs @@ -1,11 +1,11 @@ #![cfg(feature = "redb")] use crate::{ + Config, Error, config::REDB_DATA_FILENAME, database::{ interface::{Key, OpenDatabases, Value}, *, }, - Config, Error, }; use derivative::Derivative; use redb::{ReadableTable, TableDefinition}; diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index d3a26337d6a..b41aa23f7fe 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -23,8 +23,8 @@ pub use attester_record::{AttesterRecord, CompactAttesterRecord, IndexedAttester pub use block_queue::BlockQueue; pub use config::{Config, DatabaseBackend, DatabaseBackendOverride}; pub use database::{ - interface::{Database, Environment, RwTransaction}, IndexedAttestationId, SlasherDB, + interface::{Database, Environment, RwTransaction}, }; pub use error::Error; diff --git a/slasher/src/migrate.rs b/slasher/src/migrate.rs index 674ab9c132d..ec552a19d0d 100644 --- a/slasher/src/migrate.rs +++ b/slasher/src/migrate.rs @@ -1,4 +1,4 @@ -use crate::{database::CURRENT_SCHEMA_VERSION, Error, SlasherDB}; +use crate::{Error, SlasherDB, database::CURRENT_SCHEMA_VERSION}; use types::EthSpec; impl SlasherDB { diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 12f35e657ef..5d26c5a6da9 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -5,8 +5,8 @@ use crate::metrics::{ SLASHER_NUM_BLOCKS_PROCESSED, }; use crate::{ - array, AttestationBatch, AttestationQueue, AttesterRecord, BlockQueue, Config, Error, - IndexedAttestationId, ProposerSlashingStatus, RwTransaction, SimpleBatch, SlasherDB, + AttestationBatch, AttestationQueue, AttesterRecord, BlockQueue, Config, Error, + IndexedAttestationId, ProposerSlashingStatus, RwTransaction, SimpleBatch, SlasherDB, array, }; use parking_lot::Mutex; use std::collections::HashSet; diff --git a/slasher/src/test_utils.rs b/slasher/src/test_utils.rs index 8054c0ad59a..26338a019a2 100644 --- a/slasher/src/test_utils.rs +++ b/slasher/src/test_utils.rs @@ -1,11 +1,11 @@ use std::collections::HashSet; use std::sync::Arc; use types::{ - indexed_attestation::{IndexedAttestationBase, IndexedAttestationElectra}, AggregateSignature, AttestationData, AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, BeaconBlockHeader, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, Hash256, IndexedAttestation, MainnetEthSpec, Signature, SignedBeaconBlockHeader, Slot, + indexed_attestation::{IndexedAttestationBase, IndexedAttestationElectra}, }; pub type E = MainnetEthSpec; diff --git a/slasher/tests/attester_slashings.rs b/slasher/tests/attester_slashings.rs index 22c9cfc1288..9a8e1e27a4b 100644 --- a/slasher/tests/attester_slashings.rs +++ b/slasher/tests/attester_slashings.rs @@ -3,12 +3,12 @@ use maplit::hashset; use rayon::prelude::*; use slasher::{ + Config, Slasher, config::DEFAULT_CHUNK_SIZE, test_utils::{ - att_slashing, chain_spec, indexed_att, indexed_att_electra, - slashed_validators_from_slashings, E, + E, att_slashing, chain_spec, indexed_att, indexed_att_electra, + slashed_validators_from_slashings, }, - Config, Slasher, }; use std::collections::HashSet; use tempfile::tempdir; diff --git a/slasher/tests/backend.rs b/slasher/tests/backend.rs index fd1a6ae14f6..ca32b7bd6b1 100644 --- a/slasher/tests/backend.rs +++ b/slasher/tests/backend.rs @@ -1,6 +1,6 @@ #![cfg(feature = "lmdb")] -use slasher::{config::MDBX_DATA_FILENAME, Config, DatabaseBackend, DatabaseBackendOverride}; +use slasher::{Config, DatabaseBackend, DatabaseBackendOverride, config::MDBX_DATA_FILENAME}; use std::fs::File; use tempfile::tempdir; diff --git a/slasher/tests/proposer_slashings.rs b/slasher/tests/proposer_slashings.rs index ef525c6f3f9..4e363fbaa13 100644 --- a/slasher/tests/proposer_slashings.rs +++ b/slasher/tests/proposer_slashings.rs @@ -1,8 +1,8 @@ #![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] use slasher::{ - test_utils::{block as test_block, chain_spec, E}, Config, Slasher, + test_utils::{E, block as test_block, chain_spec}, }; use tempfile::tempdir; use types::{Epoch, EthSpec}; @@ -56,10 +56,8 @@ fn block_pruning() { (config.history_length - 1) * slots_per_epoch as usize + 1 ); // Check epochs of all slashings are from within range. - assert!(proposer_slashings.iter().all(|slashing| slashing - .signed_header_1 - .message - .slot - .epoch(slots_per_epoch) - > current_epoch - config.history_length as u64)); + assert!(proposer_slashings.iter().all(|slashing| { + slashing.signed_header_1.message.slot.epoch(slots_per_epoch) + > current_epoch - config.history_length as u64 + })); } diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 3270700d881..5d1b2c9a744 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -1,16 +1,16 @@ #![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] -use rand::prelude::*; +use rand::{prelude::*, rng}; use slasher::{ + Config, Slasher, SlasherDB, test_utils::{ - block, chain_spec, indexed_att, slashed_validators_from_attestations, - slashed_validators_from_slashings, E, + E, block, chain_spec, indexed_att, slashed_validators_from_attestations, + slashed_validators_from_slashings, }, - Config, Slasher, SlasherDB, }; use std::cmp::max; use std::sync::Arc; -use tempfile::{tempdir, TempDir}; +use tempfile::{TempDir, tempdir}; use types::{Epoch, EthSpec}; #[derive(Debug)] @@ -49,11 +49,11 @@ fn random_test(seed: u64, mut db: SlasherDB, test_config: TestConfig) -> Slas let mut rng = StdRng::seed_from_u64(seed); let mut config = Config::new(db.get_config().database_path.clone()); - config.validator_chunk_size = 1 << rng.gen_range(1..4); + config.validator_chunk_size = 1 << rng.random_range(1..4); - let chunk_size_exponent = rng.gen_range(1..4); + let chunk_size_exponent = rng.random_range(1..4); config.chunk_size = 1 << chunk_size_exponent; - config.history_length = 1 << rng.gen_range(chunk_size_exponent..chunk_size_exponent + 3); + config.history_length = 1 << rng.random_range(chunk_size_exponent..chunk_size_exponent + 3); let config = Arc::new(config); db.update_config(config.clone()); @@ -62,13 +62,13 @@ fn random_test(seed: u64, mut db: SlasherDB, test_config: TestConfig) -> Slas let validators = (0..num_validators as u64).collect::>(); - let num_attestations = rng.gen_range(2..max_attestations + 1); + let num_attestations = rng.random_range(2..max_attestations + 1); let mut current_epoch = Epoch::new(0); let mut attestations = vec![]; for _ in 0..num_attestations { - let num_attesters = rng.gen_range(1..num_validators); + let num_attesters = rng.random_range(1..num_validators); let mut attesting_indices = validators .choose_multiple(&mut rng, num_attesters) .copied() @@ -77,20 +77,20 @@ fn random_test(seed: u64, mut db: SlasherDB, test_config: TestConfig) -> Slas // If checking slashings, generate valid attestations in range. let (source, target) = if check_slashings { - let source = rng.gen_range( + let source = rng.random_range( current_epoch .as_u64() .saturating_sub(config.history_length as u64 - 1) ..current_epoch.as_u64() + 1, ); - let target = rng.gen_range(source..current_epoch.as_u64() + 1); + let target = rng.random_range(source..current_epoch.as_u64() + 1); (source, target) } else { - let source = rng.gen_range(0..max(3 * current_epoch.as_u64(), 1)); - let target = rng.gen_range(source..max(3 * current_epoch.as_u64(), source + 1)); + let source = rng.random_range(0..max(3 * current_epoch.as_u64(), 1)); + let target = rng.random_range(source..max(3 * current_epoch.as_u64(), source + 1)); (source, target) }; - let target_root = rng.gen_range(0..3); + let target_root = rng.random_range(0..3); let attestation = indexed_att(&attesting_indices, source, target, target_root); if check_slashings { @@ -101,25 +101,26 @@ fn random_test(seed: u64, mut db: SlasherDB, test_config: TestConfig) -> Slas slasher.accept_attestation(attestation); // Maybe add a random block too - if test_config.add_blocks && rng.gen_bool(0.1) { - let slot = rng.gen_range(0..1 + 3 * current_epoch.as_u64() * E::slots_per_epoch() / 2); - let proposer = rng.gen_range(0..num_validators as u64); - let block_root = rng.gen_range(0..2); + if test_config.add_blocks && rng.random_bool(0.1) { + let slot = + rng.random_range(0..1 + 3 * current_epoch.as_u64() * E::slots_per_epoch() / 2); + let proposer = rng.random_range(0..num_validators as u64); + let block_root = rng.random_range(0..2); slasher.accept_block_header(block(slot, proposer, block_root)); } // Maybe process - if rng.gen_bool(0.1) { + if rng.random_bool(0.1) { slasher.process_queued(current_epoch).unwrap(); // Maybe prune - if rng.gen_bool(0.1) { + if rng.random_bool(0.1) { slasher.prune_database(current_epoch).unwrap(); } } // Maybe advance to the next epoch - if rng.gen_bool(0.5) { + if rng.random_bool(0.5) { if check_slashings { slasher.process_queued(current_epoch).unwrap(); } @@ -147,10 +148,10 @@ fn random_test(seed: u64, mut db: SlasherDB, test_config: TestConfig) -> Slas #[test] #[ignore] fn no_crash() { - let mut rng = thread_rng(); + let mut rng = rng(); let (_tempdir, mut db) = make_db(); loop { - db = random_test(rng.gen(), db, TestConfig::default()); + db = random_test(rng.random(), db, TestConfig::default()); } } @@ -158,11 +159,11 @@ fn no_crash() { #[test] #[ignore] fn no_crash_with_blocks() { - let mut rng = thread_rng(); + let mut rng = rng(); let (_tempdir, mut db) = make_db(); loop { db = random_test( - rng.gen(), + rng.random(), db, TestConfig { add_blocks: true, @@ -176,11 +177,11 @@ fn no_crash_with_blocks() { #[test] #[ignore] fn check_slashings() { - let mut rng = thread_rng(); + let mut rng = rng(); let (_tempdir, mut db) = make_db(); loop { db = random_test( - rng.gen(), + rng.random(), db, TestConfig { check_slashings: true, diff --git a/slasher/tests/wrap_around.rs b/slasher/tests/wrap_around.rs index e34d0f2233c..5257bae0991 100644 --- a/slasher/tests/wrap_around.rs +++ b/slasher/tests/wrap_around.rs @@ -1,8 +1,8 @@ #![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] use slasher::{ - test_utils::{chain_spec, indexed_att}, Config, Slasher, + test_utils::{chain_spec, indexed_att}, }; use tempfile::tempdir; use types::Epoch; diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index b6f7cb21a14..17b9c90ba7a 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -167,17 +167,13 @@ impl Cases { self.test_cases .into_par_iter() .enumerate() - .map(|(i, (ref path, ref tc))| { - CaseResult::new(i, path, tc, tc.result(i, fork_name)) - }) + .map(|(i, (path, tc))| CaseResult::new(i, path, tc, tc.result(i, fork_name))) .collect() } else { self.test_cases .iter() .enumerate() - .map(|(i, (ref path, ref tc))| { - CaseResult::new(i, path, tc, tc.result(i, fork_name)) - }) + .map(|(i, (path, tc))| CaseResult::new(i, path, tc, tc.result(i, fork_name))) .collect() } } diff --git a/testing/ef_tests/src/cases/bls_batch_verify.rs b/testing/ef_tests/src/cases/bls_batch_verify.rs index 703444c9879..f1349b06e6c 100644 --- a/testing/ef_tests/src/cases/bls_batch_verify.rs +++ b/testing/ef_tests/src/cases/bls_batch_verify.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; -use bls::{verify_signature_sets, BlsWrappedSignature, PublicKeyBytes, Signature, SignatureSet}; +use bls::{BlsWrappedSignature, PublicKeyBytes, Signature, SignatureSet, verify_signature_sets}; use serde::Deserialize; use std::borrow::Cow; use std::str::FromStr; diff --git a/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs b/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs index 8a6330d3996..2da72916cfd 100644 --- a/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs +++ b/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs @@ -1,7 +1,7 @@ use super::*; use serde::Deserialize; use std::marker::PhantomData; -use types::data_column_custody_group::{compute_columns_for_custody_group, CustodyIndex}; +use types::data_column_custody_group::{CustodyIndex, compute_columns_for_custody_group}; #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec", deny_unknown_fields)] diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 0dc5e7ab115..f143643ec36 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -4,6 +4,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_state, yaml_decode_file}; use crate::type_name; use serde::Deserialize; +use state_processing::EpochProcessingError; use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::per_epoch_processing::capella::process_historical_summaries_update; @@ -11,7 +12,7 @@ use state_processing::per_epoch_processing::effective_balance_updates::{ process_effective_balance_updates, process_effective_balance_updates_slow, }; use state_processing::per_epoch_processing::single_pass::{ - process_epoch_single_pass, process_proposer_lookahead, SinglePassConfig, + SinglePassConfig, process_epoch_single_pass, process_proposer_lookahead, }; use state_processing::per_epoch_processing::{ altair, base, @@ -20,7 +21,6 @@ use state_processing::per_epoch_processing::{ process_slashings_slow, resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, }; -use state_processing::EpochProcessingError; use std::marker::PhantomData; use types::BeaconState; diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index bd2499aa285..1380e44acdd 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -5,20 +5,20 @@ use beacon_chain::beacon_proposer_cache::compute_proposer_duties_from_head; use beacon_chain::blob_verification::GossipBlobError; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::chain_config::{ - DisallowedReOrgOffsets, DEFAULT_RE_ORG_HEAD_THRESHOLD, - DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_PARENT_THRESHOLD, + DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + DEFAULT_RE_ORG_PARENT_THRESHOLD, DisallowedReOrgOffsets, }; use beacon_chain::data_column_verification::GossipVerifiedDataColumn; use beacon_chain::slot_clock::SlotClock; use beacon_chain::{ + AvailabilityProcessingStatus, BeaconChainTypes, CachedHead, ChainConfig, NotifyExecutionLayer, attestation_verification::{ - obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, + VerifiedAttestation, obtain_indexed_attestation_and_committees_per_slot, }, blob_verification::GossipVerifiedBlob, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - AvailabilityProcessingStatus, BeaconChainTypes, CachedHead, ChainConfig, NotifyExecutionLayer, }; -use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; +use execution_layer::{PayloadStatusV1, json_structures::JsonPayloadStatusV1Status}; use serde::Deserialize; use ssz_derive::Decode; use state_processing::state_advance::complete_state_advance; diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index b458b85fdd0..bf77f5da4c3 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -3,8 +3,8 @@ use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use tree_hash::Hash256; use types::{ - light_client_update, BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, - BeaconBlockBodyElectra, BeaconBlockBodyFulu, BeaconState, FixedVector, FullPayload, Unsigned, + BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, + BeaconBlockBodyFulu, BeaconState, FixedVector, FullPayload, Unsigned, light_client_update, }; #[derive(Debug, Clone, Deserialize)] @@ -160,7 +160,7 @@ impl LoadCase for KzgInclusionMerkleProofValidity { return Err(Error::InternalError(format!( "KZG inclusion merkle proof validity test skipped for {:?}", fork_name - ))) + ))); } ForkName::Deneb => { ssz_decode_file::>(&path.join("object.ssz_snappy"))?.into() @@ -274,7 +274,7 @@ impl LoadCase for BeaconBlockBodyMerkleProofValidity { return Err(Error::InternalError(format!( "Beacon block body merkle proof validity test skipped for {:?}", fork_name - ))) + ))); } ForkName::Capella => { ssz_decode_file::>(&path.join("object.ssz_snappy"))? diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index e2c9e64ba41..379fcb1bb4d 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -10,16 +10,17 @@ use state_processing::per_block_processing::process_operations::{ process_consolidation_requests, process_deposit_requests, process_withdrawal_requests, }; use state_processing::{ + ConsensusContext, per_block_processing::{ + VerifyBlockRoot, VerifySignatures, errors::BlockProcessingError, process_block_header, process_execution_payload, process_operations::{ altair_deneb, base, process_attester_slashings, process_bls_to_execution_changes, process_deposits, process_exits, process_proposer_slashings, }, - process_sync_aggregate, process_withdrawals, VerifyBlockRoot, VerifySignatures, + process_sync_aggregate, process_withdrawals, }, - ConsensusContext, }; use std::fmt::Debug; use types::{ @@ -597,10 +598,10 @@ impl> Case for Operations { let mut state = pre_state.clone(); let mut expected = self.post.clone(); - if O::handler_name() != "withdrawals" { - if let Some(post_state) = expected.as_mut() { - post_state.build_all_committee_caches(spec).unwrap(); - } + if O::handler_name() != "withdrawals" + && let Some(post_state) = expected.as_mut() + { + post_state.build_all_committee_caches(spec).unwrap(); } let mut result = self diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index c5879f5c9cc..d6ce8be7428 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -7,12 +7,11 @@ use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use state_processing::per_epoch_processing::base::rewards_and_penalties::ProposerRewardCalculation; use state_processing::{ + EpochProcessingError, per_epoch_processing::{ - altair, - base::{self, rewards_and_penalties::AttestationDelta, ValidatorStatuses}, - Delta, + Delta, altair, + base::{self, ValidatorStatuses, rewards_and_penalties::AttestationDelta}, }, - EpochProcessingError, }; use types::BeaconState; diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index 91bb995cc43..538783eaa90 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -4,8 +4,8 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use state_processing::{ - per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, - ConsensusContext, VerifyBlockRoot, + BlockProcessingError, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, + per_block_processing, per_slot_processing, }; use types::{BeaconState, RelativeEpoch, SignedBeaconBlock}; diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 3aa147a74c2..1155aacb27c 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -4,8 +4,8 @@ use super::*; use crate::cases::common::{DecimalU128, DecimalU256, SszStaticType}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::{context_yaml_decode_file, log_file_access, snappy_decode_file}; -use context_deserialize::{context_deserialize, ContextDeserialize}; -use serde::{de::Error as SerdeError, Deserialize, Deserializer}; +use context_deserialize::{ContextDeserialize, context_deserialize}; +use serde::{Deserialize, Deserializer, de::Error as SerdeError}; use ssz_derive::{Decode, Encode}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 6d037dae87d..9fdfdbf60c5 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -3,8 +3,8 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use state_processing::{ - per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, - ConsensusContext, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, per_block_processing, + state_advance::complete_state_advance, }; use std::str::FromStr; use types::{BeaconState, Epoch, SignedBeaconBlock}; diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index ed6a20381cd..0fd2f5a0b2d 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -1,6 +1,6 @@ use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; use crate::type_name::TypeName; -use crate::{type_name, FeatureName}; +use crate::{FeatureName, type_name}; use context_deserialize::ContextDeserialize; use derivative::Derivative; use std::fs::{self, DirEntry}; diff --git a/testing/execution_engine_integration/src/genesis_json.rs b/testing/execution_engine_integration/src/genesis_json.rs index 991118478dc..e5bf90d41f8 100644 --- a/testing/execution_engine_integration/src/genesis_json.rs +++ b/testing/execution_engine_integration/src/genesis_json.rs @@ -1,4 +1,4 @@ -use serde_json::{json, Value}; +use serde_json::{Value, json}; /// Sourced from: /// diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index b0d115960cb..05ec0a2f191 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -1,5 +1,5 @@ use crate::execution_engine::{ - ExecutionEngine, GenericExecutionEngine, ACCOUNT1, ACCOUNT2, KEYSTORE_PASSWORD, PRIVATE_KEYS, + ACCOUNT1, ACCOUNT2, ExecutionEngine, GenericExecutionEngine, KEYSTORE_PASSWORD, PRIVATE_KEYS, }; use crate::transactions::transactions; use ethers_middleware::SignerMiddleware; @@ -11,9 +11,9 @@ use execution_layer::{ PayloadParameters, PayloadStatus, }; use fork_choice::ForkchoiceUpdateParameters; -use reqwest::{header::CONTENT_TYPE, Client}; +use reqwest::{Client, header::CONTENT_TYPE}; use sensitive_url::SensitiveUrl; -use serde_json::{json, Value}; +use serde_json::{Value, json}; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs index fd458ad205d..b6111426b67 100644 --- a/testing/execution_engine_integration/src/transactions.rs +++ b/testing/execution_engine_integration/src/transactions.rs @@ -1,7 +1,7 @@ -use deposit_contract::{encode_eth1_tx_data, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS}; +use deposit_contract::{BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS, encode_eth1_tx_data}; use ethers_core::types::{ - transaction::{eip2718::TypedTransaction, eip2930::AccessList}, Address, Bytes, Eip1559TransactionRequest, TransactionRequest, U256, + transaction::{eip2718::TypedTransaction, eip2930::AccessList}, }; use types::{DepositData, EthSpec, FixedBytesExtended, Hash256, Keypair, Signature}; diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 4021a6d2c50..df191ed5af7 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -4,7 +4,7 @@ use beacon_node::ProductionBeaconNode; use environment::RuntimeContext; -use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, Timeouts}; +use eth2::{BeaconNodeHttpClient, Timeouts, reqwest::ClientBuilder}; use sensitive_url::SensitiveUrl; use std::path::PathBuf; use std::time::Duration; diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index ae9f6c0cc6f..b47832ece76 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -1,13 +1,14 @@ use crate::local_network::LocalNetworkParams; use crate::local_network::TERMINAL_BLOCK; -use crate::{checks, LocalNetwork}; +use crate::{LocalNetwork, checks}; use clap::ArgMatches; use crate::retry::with_retry; use futures::prelude::*; use node_test_rig::{ + ApiTopic, ValidatorFiles, environment::{EnvironmentBuilder, LoggerConfig}, - testing_validator_config, ApiTopic, ValidatorFiles, + testing_validator_config, }; use rayon::prelude::*; use std::cmp::max; @@ -17,7 +18,7 @@ use std::time::Duration; use environment::tracing_common; use tracing_subscriber::prelude::*; -use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; +use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitExt}; use logging::build_workspace_filter; use tokio::time::sleep; diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index e7cc9b7a4ee..1368c495cd8 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -312,7 +312,9 @@ pub(crate) async fn verify_light_client_updates( .signature_slot(); let signature_slot_distance = slot - signature_slot; if signature_slot_distance > light_client_update_slot_tolerance { - return Err(format!("Existing optimistic update too old: signature slot {signature_slot}, current slot {slot:?}")); + return Err(format!( + "Existing optimistic update too old: signature slot {signature_slot}, current slot {slot:?}" + )); } // Verify light client finality update. `signature_slot_distance` should be 1 in the ideal scenario. diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 1fa59df4fe1..70c4680e928 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -1,4 +1,4 @@ -use clap::{crate_version, Arg, ArgAction, Command}; +use clap::{Arg, ArgAction, Command, crate_version}; pub fn cli_app() -> Command { Command::new("simulator") diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index f60ce5fc090..58875a985a0 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -1,5 +1,5 @@ use crate::local_network::LocalNetworkParams; -use crate::{checks, LocalNetwork}; +use crate::{LocalNetwork, checks}; use clap::ArgMatches; use crate::retry::with_retry; @@ -7,8 +7,9 @@ use environment::tracing_common; use futures::prelude::*; use logging::build_workspace_filter; use node_test_rig::{ + ValidatorFiles, environment::{EnvironmentBuilder, LoggerConfig}, - testing_validator_config, ValidatorFiles, + testing_validator_config, }; use rayon::prelude::*; use std::cmp::max; @@ -18,7 +19,7 @@ use std::time::Duration; use tokio::time::sleep; use tracing::Level; use tracing_subscriber::prelude::*; -use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; +use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitExt}; use types::{Epoch, EthSpec, MinimalEthSpec}; const END_EPOCH: u64 = 16; const GENESIS_DELAY: u64 = 38; diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 3914d33f936..1f96004d1cf 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -1,10 +1,11 @@ use crate::checks::epoch_delay; use kzg::trusted_setup::get_trusted_setup; use node_test_rig::{ + ClientConfig, ClientGenesis, LocalBeaconNode, LocalExecutionNode, LocalValidatorClient, + MockExecutionConfig, MockServerConfig, ValidatorConfig, ValidatorFiles, environment::RuntimeContext, - eth2::{types::StateId, BeaconNodeHttpClient}, - testing_client_config, ClientConfig, ClientGenesis, LocalBeaconNode, LocalExecutionNode, - LocalValidatorClient, MockExecutionConfig, MockServerConfig, ValidatorConfig, ValidatorFiles, + eth2::{BeaconNodeHttpClient, types::StateId}, + testing_client_config, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; diff --git a/testing/simulator/src/retry.rs b/testing/simulator/src/retry.rs index ad85b74236c..dea132c9da7 100644 --- a/testing/simulator/src/retry.rs +++ b/testing/simulator/src/retry.rs @@ -32,11 +32,7 @@ mod tests { use std::collections::VecDeque; async fn my_async_func(is_ok: bool) -> Result<(), ()> { - if is_ok { - Ok(()) - } else { - Err(()) - } + if is_ok { Ok(()) } else { Err(()) } } #[tokio::test] diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 61cae6dbe1b..f8ece0218f5 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -1,7 +1,7 @@ use super::*; use state_processing::{ - per_block_processing, per_block_processing::errors::ExitInvalid, BlockProcessingError, - BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, + BlockProcessingError, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, + per_block_processing, per_block_processing::errors::ExitInvalid, }; use types::{BeaconBlock, Epoch}; diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index 7f0f697d61d..4a829b68035 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -11,7 +11,7 @@ use std::path::{Path, PathBuf}; use std::process::exit; use std::sync::LazyLock; use types::{ - test_utils::generate_deterministic_keypairs, BeaconState, EthSpec, Keypair, SignedBeaconBlock, + BeaconState, EthSpec, Keypair, SignedBeaconBlock, test_utils::generate_deterministic_keypairs, }; use types::{FixedBytesExtended, Hash256, MainnetEthSpec, Slot}; diff --git a/testing/web3signer_tests/src/get_web3signer.rs b/testing/web3signer_tests/src/get_web3signer.rs index 8c46a07a7dd..0c3d9b02db5 100644 --- a/testing/web3signer_tests/src/get_web3signer.rs +++ b/testing/web3signer_tests/src/get_web3signer.rs @@ -28,7 +28,10 @@ pub async fn download_binary(dest_dir: PathBuf) { // Download the release zip. let client = Client::builder().build().unwrap(); - let zip_url = format!("https://artifacts.consensys.net/public/web3signer/raw/names/web3signer.zip/versions/{}/web3signer-{}.zip", version, version); + let zip_url = format!( + "https://artifacts.consensys.net/public/web3signer/raw/names/web3signer.zip/versions/{}/web3signer-{}.zip", + version, version + ); let zip_response = client .get(zip_url) .send() diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 4bc0f623461..15ec745e3f1 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -24,13 +24,13 @@ mod tests { use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; use initialized_validators::{ - load_pem_certificate, load_pkcs12_identity, InitializedValidators, + InitializedValidators, load_pem_certificate, load_pkcs12_identity, }; use lighthouse_validator_store::LighthouseValidatorStore; use parking_lot::Mutex; use reqwest::Client; use serde::Serialize; - use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; + use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use slot_clock::{SlotClock, TestingSlotClock}; use std::env; use std::fmt::Debug; @@ -41,7 +41,7 @@ mod tests { use std::sync::{Arc, LazyLock}; use std::time::{Duration, Instant}; use task_executor::TaskExecutor; - use tempfile::{tempdir, TempDir}; + use tempfile::{TempDir, tempdir}; use tokio::sync::OnceCell; use tokio::time::sleep; use types::{attestation::AttestationBase, *}; diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index b3158cd380c..e5f4f1ab4c5 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -4,14 +4,14 @@ pub mod beacon_node_health; use beacon_node_health::{ - check_node_health, BeaconNodeHealth, BeaconNodeSyncDistanceTiers, ExecutionEngineHealth, - IsOptimistic, SyncDistanceTier, + BeaconNodeHealth, BeaconNodeSyncDistanceTiers, ExecutionEngineHealth, IsOptimistic, + SyncDistanceTier, check_node_health, }; use clap::ValueEnum; use eth2::{BeaconNodeHttpClient, Timeouts}; use futures::future; use sensitive_url::SensitiveUrl; -use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; +use serde::{Deserialize, Serialize, Serializer, ser::SerializeStruct}; use slot_clock::SlotClock; use std::cmp::Ordering; use std::fmt; @@ -25,7 +25,7 @@ use task_executor::TaskExecutor; use tokio::{sync::RwLock, time::sleep}; use tracing::{debug, error, warn}; use types::{ChainSpec, Config as ConfigSpec, EthSpec, Slot}; -use validator_metrics::{inc_counter_vec, ENDPOINT_ERRORS, ENDPOINT_REQUESTS}; +use validator_metrics::{ENDPOINT_ERRORS, ENDPOINT_REQUESTS, inc_counter_vec}; /// Message emitted when the VC detects the BN is using a different spec. const UPDATE_REQUIRED_LOG_HINT: &str = "this VC or the remote BN may need updating"; @@ -783,10 +783,12 @@ mod tests { let mut variants = ApiTopic::VARIANTS.to_vec(); variants.retain(|s| *s != "none"); assert_eq!(all.len(), variants.len()); - assert!(variants - .iter() - .map(|topic| ApiTopic::from_str(topic, true).unwrap()) - .eq(all.into_iter())); + assert!( + variants + .iter() + .map(|topic| ApiTopic::from_str(topic, true).unwrap()) + .eq(all.into_iter()) + ); } #[tokio::test] diff --git a/validator_client/doppelganger_service/src/lib.rs b/validator_client/doppelganger_service/src/lib.rs index e3c7ce78b44..b0ed78e9965 100644 --- a/validator_client/doppelganger_service/src/lib.rs +++ b/validator_client/doppelganger_service/src/lib.rs @@ -261,8 +261,8 @@ impl DoppelgangerService { continue; } - if let Some(slot) = slot_clock.now() { - if let Err(e) = service + if let Some(slot) = slot_clock.now() + && let Err(e) = service .detect_doppelgangers::( slot, &get_index, @@ -270,12 +270,11 @@ impl DoppelgangerService { &mut shutdown_func, ) .await - { - error!( - error = ?e, - "Error during doppelganger detection" - ); - } + { + error!( + error = ?e, + "Error during doppelganger detection" + ); } } }, @@ -603,8 +602,8 @@ mod test { use std::future; use std::time::Duration; use types::{ - test_utils::{SeedableRng, TestRandom, XorShiftRng}, MainnetEthSpec, + test_utils::{SeedableRng, TestRandom, XorShiftRng}, }; use validator_store::DoppelgangerStatus; diff --git a/validator_client/graffiti_file/src/lib.rs b/validator_client/graffiti_file/src/lib.rs index 86f582aa38e..8b5637d09ed 100644 --- a/validator_client/graffiti_file/src/lib.rs +++ b/validator_client/graffiti_file/src/lib.rs @@ -2,11 +2,11 @@ use bls::PublicKeyBytes; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::fs::File; -use std::io::{prelude::*, BufReader}; +use std::io::{BufReader, prelude::*}; use std::path::PathBuf; use std::str::FromStr; use tracing::warn; -use types::{graffiti::GraffitiString, Graffiti}; +use types::{Graffiti, graffiti::GraffitiString}; #[derive(Debug)] #[allow(clippy::enum_variant_names)] diff --git a/validator_client/http_api/src/api_secret.rs b/validator_client/http_api/src/api_secret.rs index bac54dc8b24..2241d791bcd 100644 --- a/validator_client/http_api/src/api_secret.rs +++ b/validator_client/http_api/src/api_secret.rs @@ -1,6 +1,6 @@ use filesystem::create_with_600_perms; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use rand::distr::Alphanumeric; +use rand::{Rng, rng}; use std::fs; use std::path::{Path, PathBuf}; use warp::Filter; @@ -58,7 +58,7 @@ impl ApiSecret { } let length = PK_LEN; - let pk: String = thread_rng() + let pk: String = rng() .sample_iter(&Alphanumeric) .take(length) .map(char::from) diff --git a/validator_client/http_api/src/create_validator.rs b/validator_client/http_api/src/create_validator.rs index 278274198d5..e4aff34dbc3 100644 --- a/validator_client/http_api/src/create_validator.rs +++ b/validator_client/http_api/src/create_validator.rs @@ -1,7 +1,7 @@ use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; use account_utils::{ eth2_keystore::Keystore, - eth2_wallet::{bip39::Mnemonic, WalletBuilder}, + eth2_wallet::{WalletBuilder, bip39::Mnemonic}, random_mnemonic, random_password, }; use eth2::lighthouse_vc::types::{self as api_types}; @@ -9,7 +9,7 @@ use lighthouse_validator_store::LighthouseValidatorStore; use slot_clock::SlotClock; use std::path::{Path, PathBuf}; use types::{ChainSpec, EthSpec}; -use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; +use validator_dir::{Builder as ValidatorDirBuilder, keystore_password_path}; use zeroize::Zeroizing; /// Create some validator EIP-2335 keystores and store them on disk. Then, enroll the validators in diff --git a/validator_client/http_api/src/graffiti.rs b/validator_client/http_api/src/graffiti.rs index 4372b14b04a..3cc898435de 100644 --- a/validator_client/http_api/src/graffiti.rs +++ b/validator_client/http_api/src/graffiti.rs @@ -2,7 +2,7 @@ use bls::PublicKey; use lighthouse_validator_store::LighthouseValidatorStore; use slot_clock::SlotClock; use std::sync::Arc; -use types::{graffiti::GraffitiString, EthSpec, Graffiti}; +use types::{EthSpec, Graffiti, graffiti::GraffitiString}; pub fn get_graffiti( validator_pubkey: PublicKey, diff --git a/validator_client/http_api/src/keystores.rs b/validator_client/http_api/src/keystores.rs index 302b21d7d8e..c0f918f9bb8 100644 --- a/validator_client/http_api/src/keystores.rs +++ b/validator_client/http_api/src/keystores.rs @@ -19,7 +19,7 @@ use task_executor::TaskExecutor; use tokio::runtime::Handle; use tracing::{info, warn}; use types::{EthSpec, PublicKeyBytes}; -use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; +use validator_dir::{Builder as ValidatorDirBuilder, keystore_password_path}; use warp::Rejection; use warp_utils::reject::{custom_bad_request, custom_server_error}; use zeroize::Zeroizing; @@ -79,7 +79,7 @@ pub fn import( let slashing_protection_status = if let Some(InterchangeJsonStr(slashing_protection)) = request.slashing_protection { // Warn for missing slashing protection. - for KeystoreJsonStr(ref keystore) in &request.keystores { + for KeystoreJsonStr(keystore) in &request.keystores { if let Some(public_key) = keystore.public_key() { let pubkey_bytes = public_key.compress(); if !slashing_protection diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index 02a677212cb..4494fca9574 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -12,7 +12,7 @@ pub use api_secret::PK_FILENAME; use graffiti::{delete_graffiti, get_graffiti, set_graffiti}; use create_signed_voluntary_exit::create_signed_voluntary_exit; -use graffiti_file::{determine_graffiti, GraffitiFile}; +use graffiti_file::{GraffitiFile, determine_graffiti}; use lighthouse_validator_store::LighthouseValidatorStore; use validator_store::ValidatorStore; @@ -36,8 +36,8 @@ use eth2::lighthouse_vc::{ }; use health_metrics::observe::Observe; use lighthouse_version::version_with_platform; -use logging::crit; use logging::SSELoggingComponents; +use logging::crit; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; @@ -50,12 +50,12 @@ use std::sync::Arc; use sysinfo::{System, SystemExt}; use system_health::observe_system_health_vc; use task_executor::TaskExecutor; -use tokio_stream::{wrappers::BroadcastStream, StreamExt}; +use tokio_stream::{StreamExt, wrappers::BroadcastStream}; use tracing::{info, warn}; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; use validator_services::block_service::BlockService; -use warp::{reply::Response, sse::Event, Filter}; +use warp::{Filter, reply::Response, sse::Event}; use warp_utils::reject::convert_rejection; use warp_utils::task::blocking_json_task; @@ -883,7 +883,7 @@ pub fn serve( return convert_rejection::(Err( warp_utils::reject::custom_bad_request(e.to_string()), )) - .await + .await; } } } diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index 53bcf7baebb..b15f08578dd 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -7,24 +7,24 @@ use account_utils::{ use deposit_contract::decode_eth1_tx_data; use doppelganger_service::DoppelgangerService; use eth2::{ + Error as ApiError, lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*}, types::ErrorMessage as ApiErrorMessage, - Error as ApiError, }; use eth2_keystore::KeystoreBuilder; -use initialized_validators::key_cache::{KeyCache, CACHE_FILENAME}; +use initialized_validators::key_cache::{CACHE_FILENAME, KeyCache}; use initialized_validators::{InitializedValidators, OnDecryptFailure}; use lighthouse_validator_store::{Config as ValidatorStoreConfig, LighthouseValidatorStore}; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; -use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use slot_clock::{SlotClock, TestingSlotClock}; use std::future::Future; use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; use std::time::Duration; use task_executor::test_utils::TestRuntime; -use tempfile::{tempdir, TempDir}; +use tempfile::{TempDir, tempdir}; use tokio::sync::oneshot; use types::ChainSpec; use validator_services::block_service::BlockService; @@ -370,9 +370,11 @@ impl ApiTester { // Ensure the server lists all of these newly created validators. for validator in &response { - assert!(server_vals - .iter() - .any(|server_val| server_val.voting_pubkey == validator.voting_pubkey)); + assert!( + server_vals + .iter() + .any(|server_val| server_val.voting_pubkey == validator.voting_pubkey) + ); } /* @@ -569,16 +571,17 @@ impl ApiTester { enabled ); - assert!(self - .client - .get_lighthouse_validators() - .await - .unwrap() - .data - .into_iter() - .find(|v| v.voting_pubkey == validator.voting_pubkey) - .map(|v| v.enabled == enabled) - .unwrap()); + assert!( + self.client + .get_lighthouse_validators() + .await + .unwrap() + .data + .into_iter() + .find(|v| v.voting_pubkey == validator.voting_pubkey) + .map(|v| v.enabled == enabled) + .unwrap() + ); // Check the server via an individual request. assert_eq!( diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index b021186e77a..c9a59521c57 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -13,15 +13,15 @@ use account_utils::{ }; use deposit_contract::decode_eth1_tx_data; use eth2::{ + Error as ApiError, lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*}, types::ErrorMessage as ApiErrorMessage, - Error as ApiError, }; use eth2_keystore::KeystoreBuilder; use lighthouse_validator_store::{Config as ValidatorStoreConfig, LighthouseValidatorStore}; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; -use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use slot_clock::{SlotClock, TestingSlotClock}; use std::future::Future; use std::net::{IpAddr, Ipv4Addr}; @@ -29,7 +29,7 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use task_executor::test_utils::TestRuntime; -use tempfile::{tempdir, TempDir}; +use tempfile::{TempDir, tempdir}; use types::graffiti::GraffitiString; use validator_store::ValidatorStore; use zeroize::Zeroizing; @@ -330,9 +330,11 @@ impl ApiTester { // Ensure the server lists all of these newly created validators. for validator in &response { - assert!(server_vals - .iter() - .any(|server_val| server_val.voting_pubkey == validator.voting_pubkey)); + assert!( + server_vals + .iter() + .any(|server_val| server_val.voting_pubkey == validator.voting_pubkey) + ); } /* @@ -556,16 +558,17 @@ impl ApiTester { enabled ); - assert!(self - .client - .get_lighthouse_validators() - .await - .unwrap() - .data - .into_iter() - .find(|v| v.voting_pubkey == validator.voting_pubkey) - .map(|v| v.enabled == enabled) - .unwrap()); + assert!( + self.client + .get_lighthouse_validators() + .await + .unwrap() + .data + .into_iter() + .find(|v| v.voting_pubkey == validator.voting_pubkey) + .map(|v| v.enabled == enabled) + .unwrap() + ); // Check the server via an individual request. assert_eq!( diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index 37f7513f379..a3c6cb4be32 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -9,11 +9,12 @@ use eth2::lighthouse_vc::{ }; use itertools::Itertools; use lighthouse_validator_store::DEFAULT_GAS_LIMIT; -use rand::{rngs::SmallRng, Rng, SeedableRng}; +use rand::rngs::StdRng; +use rand::{Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; use std::{collections::HashMap, path::Path}; use tokio::runtime::Handle; -use types::{attestation::AttestationBase, Address}; +use types::{Address, attestation::AttestationBase}; use validator_store::ValidatorStore; use zeroize::Zeroizing; @@ -1124,11 +1125,14 @@ async fn generic_migration_test( delete_indices.len() ); for &i in &delete_indices { - assert!(delete_res - .slashing_protection - .data - .iter() - .any(|interchange_data| interchange_data.pubkey == keystore_pubkey(&keystores[i]))); + assert!( + delete_res + .slashing_protection + .data + .iter() + .any(|interchange_data| interchange_data.pubkey + == keystore_pubkey(&keystores[i])) + ); } // Filter slashing protection according to `slashing_protection_indices`. @@ -1324,13 +1328,13 @@ async fn delete_concurrent_with_signing() { let all_pubkeys = all_pubkeys.clone(); let handle = handle.spawn(async move { - let mut rng = SmallRng::from_entropy(); + let mut rng: StdRng = SeedableRng::from_os_rng(); let mut slashing_protection = vec![]; for _ in 0..num_delete_attempts { let to_delete = all_pubkeys .iter() - .filter(|_| rng.gen_bool(delete_prob)) + .filter(|_| rng.random_bool(delete_prob)) .copied() .collect::>(); diff --git a/validator_client/http_metrics/src/lib.rs b/validator_client/http_metrics/src/lib.rs index 74419399576..70b447a4939 100644 --- a/validator_client/http_metrics/src/lib.rs +++ b/validator_client/http_metrics/src/lib.rs @@ -16,7 +16,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use tracing::info; use types::EthSpec; use validator_services::duties_service::DutiesService; -use warp::{http::Response, Filter}; +use warp::{Filter, http::Response}; #[derive(Debug)] pub enum Error { @@ -169,34 +169,34 @@ pub fn gather_prometheus_metrics( { let shared = ctx.shared.read(); - if let Some(genesis_time) = shared.genesis_time { - if let Ok(now) = SystemTime::now().duration_since(UNIX_EPOCH) { - let distance = now.as_secs() as i64 - genesis_time as i64; - set_gauge(&GENESIS_DISTANCE, distance); - } + if let Some(genesis_time) = shared.genesis_time + && let Ok(now) = SystemTime::now().duration_since(UNIX_EPOCH) + { + let distance = now.as_secs() as i64 - genesis_time as i64; + set_gauge(&GENESIS_DISTANCE, distance); } - if let Some(duties_service) = &shared.duties_service { - if let Some(slot) = duties_service.slot_clock.now() { - let current_epoch = slot.epoch(E::slots_per_epoch()); - let next_epoch = current_epoch + 1; - - set_int_gauge( - &PROPOSER_COUNT, - &[CURRENT_EPOCH], - duties_service.proposer_count(current_epoch) as i64, - ); - set_int_gauge( - &ATTESTER_COUNT, - &[CURRENT_EPOCH], - duties_service.attester_count(current_epoch) as i64, - ); - set_int_gauge( - &ATTESTER_COUNT, - &[NEXT_EPOCH], - duties_service.attester_count(next_epoch) as i64, - ); - } + if let Some(duties_service) = &shared.duties_service + && let Some(slot) = duties_service.slot_clock.now() + { + let current_epoch = slot.epoch(E::slots_per_epoch()); + let next_epoch = current_epoch + 1; + + set_int_gauge( + &PROPOSER_COUNT, + &[CURRENT_EPOCH], + duties_service.proposer_count(current_epoch) as i64, + ); + set_int_gauge( + &ATTESTER_COUNT, + &[CURRENT_EPOCH], + duties_service.attester_count(current_epoch) as i64, + ); + set_int_gauge( + &ATTESTER_COUNT, + &[NEXT_EPOCH], + duties_service.attester_count(next_epoch) as i64, + ); } } diff --git a/validator_client/initialized_validators/src/key_cache.rs b/validator_client/initialized_validators/src/key_cache.rs index 053eaafb7e5..a5a481923dd 100644 --- a/validator_client/initialized_validators/src/key_cache.rs +++ b/validator_client/initialized_validators/src/key_cache.rs @@ -5,8 +5,8 @@ use eth2_keystore::json_keystore::{ Sha256Checksum, }; use eth2_keystore::{ - decrypt, default_kdf, encrypt, keypair_from_secret, Error as KeystoreError, PlainText, Uuid, - ZeroizeHash, IV_SIZE, SALT_SIZE, + Error as KeystoreError, IV_SIZE, PlainText, SALT_SIZE, Uuid, ZeroizeHash, decrypt, default_kdf, + encrypt, keypair_from_secret, }; use rand::prelude::*; use serde::{Deserialize, Serialize}; @@ -65,8 +65,8 @@ impl KeyCache { } pub fn init_crypto() -> Crypto { - let salt = rand::thread_rng().gen::<[u8; SALT_SIZE]>(); - let iv = rand::thread_rng().gen::<[u8; IV_SIZE]>().to_vec().into(); + let salt = rand::rng().random::<[u8; SALT_SIZE]>(); + let iv = rand::rng().random::<[u8; IV_SIZE]>().to_vec().into(); let kdf = default_kdf(salt.to_vec()); let cipher = Cipher::Aes128Ctr(Aes128Ctr { iv }); diff --git a/validator_client/initialized_validators/src/lib.rs b/validator_client/initialized_validators/src/lib.rs index 957430fa57a..4d61bd4ed81 100644 --- a/validator_client/initialized_validators/src/lib.rs +++ b/validator_client/initialized_validators/src/lib.rs @@ -11,8 +11,8 @@ pub mod key_cache; use account_utils::{ read_password, read_password_from_user, read_password_string, validator_definitions::{ - self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, - CONFIG_FILENAME, + self, CONFIG_FILENAME, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, + Web3SignerDefinition, }, }; use eth2_keystore::Keystore; @@ -162,7 +162,7 @@ impl InitializedValidator { pub fn keystore_lockfile(&self) -> Option> { match self.signing_method.as_ref() { SigningMethod::LocalKeystore { - ref voting_keystore_lockfile, + voting_keystore_lockfile, .. } => MutexGuard::try_map(voting_keystore_lockfile.lock(), |option_lockfile| { option_lockfile.as_mut() @@ -671,20 +671,19 @@ impl InitializedValidators { // 3. Delete from `self.validators`, which holds the signing method. // Delete the keystore files. - if let Some(initialized_validator) = self.validators.remove(&pubkey.compress()) { - if let SigningMethod::LocalKeystore { + if let Some(initialized_validator) = self.validators.remove(&pubkey.compress()) + && let SigningMethod::LocalKeystore { ref voting_keystore_path, ref voting_keystore_lockfile, ref voting_keystore, .. } = *initialized_validator.signing_method - { - // Drop the lock file so that it may be deleted. This is particularly important on - // Windows where the lockfile will fail to be deleted if it is still open. - drop(voting_keystore_lockfile.lock().take()); + { + // Drop the lock file so that it may be deleted. This is particularly important on + // Windows where the lockfile will fail to be deleted if it is still open. + drop(voting_keystore_lockfile.lock().take()); - self.delete_keystore_or_validator_dir(voting_keystore_path, voting_keystore)?; - } + self.delete_keystore_or_validator_dir(voting_keystore_path, voting_keystore)?; } // 4. Delete from validator definitions entirely. @@ -695,17 +694,16 @@ impl InitializedValidators { .map_err(Error::UnableToSaveDefinitions)?; // 5. Delete the keystore password if it's not being used by any definition. - if let Some(password_path) = password_path_opt.and_then(|p| p.canonicalize().ok()) { - if self + if let Some(password_path) = password_path_opt.and_then(|p| p.canonicalize().ok()) + && self .definitions .iter_voting_keystore_password_paths() // Require canonicalized paths so we can do a true equality check. .filter_map(|existing| existing.canonicalize().ok()) .all(|existing| existing != password_path) - { - fs::remove_file(&password_path) - .map_err(|e| Error::UnableToDeletePasswordFile(password_path, e))?; - } + { + fs::remove_file(&password_path) + .map_err(|e| Error::UnableToDeletePasswordFile(password_path, e))?; } Ok(keystore_and_password) @@ -723,14 +721,13 @@ impl InitializedValidators { // If the parent directory is a `ValidatorDir` within `self.validators_dir`, then // delete the entire directory so that it may be recreated if the keystore is // re-imported. - if let Some(validator_dir) = voting_keystore_path.parent() { - if validator_dir + if let Some(validator_dir) = voting_keystore_path.parent() + && validator_dir == ValidatorDirBuilder::get_dir_path(&self.validators_dir, voting_keystore) - { - fs::remove_dir_all(validator_dir) - .map_err(|e| Error::UnableToDeleteValidatorDir(validator_dir.into(), e))?; - return Ok(()); - } + { + fs::remove_dir_all(validator_dir) + .map_err(|e| Error::UnableToDeleteValidatorDir(validator_dir.into(), e))?; + return Ok(()); } // Otherwise just delete the keystore file. fs::remove_file(voting_keystore_path) @@ -1415,7 +1412,7 @@ impl InitializedValidators { for def in self.definitions.as_mut_slice() { match &mut def.signing_definition { SigningDefinition::LocalKeystore { - ref mut voting_keystore_password, + voting_keystore_password, .. } => { if let Some(password) = voting_keystore_password.take() { diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index 67af1d73fed..ed1ffa6bf6f 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; use signing_method::Error as SigningError; use signing_method::{SignableMessage, SigningContext, SigningMethod}; use slashing_protection::{ - interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase, + InterchangeError, NotSafe, Safe, SlashingDatabase, interchange::Interchange, }; use slot_clock::SlotClock; use std::marker::PhantomData; @@ -17,13 +17,13 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tracing::{error, info, warn}; use types::{ - graffiti::GraffitiString, AbstractExecPayload, Address, AggregateAndProof, Attestation, - BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, - Graffiti, Hash256, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, - SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, - SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, - VoluntaryExit, + AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, + ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, + PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, + SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, + Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, + SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, + graffiti::GraffitiString, }; use validator_store::{ DoppelgangerStatus, Error as ValidatorStoreError, ProposalData, SignedBlock, UnsignedBlock, @@ -693,11 +693,7 @@ impl ValidatorStore for LighthouseValidatorS // If builder boost factor is set to 100 it should be treated // as None to prevent unnecessary calculations that could // lead to loss of information. - if factor == 100 { - None - } else { - Some(factor) - } + if factor == 100 { None } else { Some(factor) } }) } diff --git a/validator_client/signing_method/src/lib.rs b/validator_client/signing_method/src/lib.rs index 316c1d2205c..c535415b1e9 100644 --- a/validator_client/signing_method/src/lib.rs +++ b/validator_client/signing_method/src/lib.rs @@ -6,7 +6,7 @@ use eth2_keystore::Keystore; use lockfile::Lockfile; use parking_lot::Mutex; -use reqwest::{header::ACCEPT, Client}; +use reqwest::{Client, header::ACCEPT}; use std::path::PathBuf; use std::sync::Arc; use task_executor::TaskExecutor; diff --git a/validator_client/slashing_protection/src/attestation_tests.rs b/validator_client/slashing_protection/src/attestation_tests.rs index b577ccd9d85..37766f271bb 100644 --- a/validator_client/slashing_protection/src/attestation_tests.rs +++ b/validator_client/slashing_protection/src/attestation_tests.rs @@ -159,8 +159,10 @@ fn valid_multiple_validators_not_surrounding() { #[test] fn invalid_source_exceeds_target() { StreamTest { - cases: vec![Test::single(attestation_data_builder(1, 0)) - .expect_invalid_att(InvalidAttestation::SourceExceedsTarget)], + cases: vec![ + Test::single(attestation_data_builder(1, 0)) + .expect_invalid_att(InvalidAttestation::SourceExceedsTarget), + ], ..StreamTest::default() } .run() diff --git a/validator_client/slashing_protection/src/bin/test_generator.rs b/validator_client/slashing_protection/src/bin/test_generator.rs index ff5866f9866..4576231b7bd 100644 --- a/validator_client/slashing_protection/src/bin/test_generator.rs +++ b/validator_client/slashing_protection/src/bin/test_generator.rs @@ -1,9 +1,9 @@ +use slashing_protection::SUPPORTED_INTERCHANGE_FORMAT_VERSION; use slashing_protection::interchange::{ Interchange, InterchangeData, InterchangeMetadata, SignedAttestation, SignedBlock, }; use slashing_protection::interchange_test::{MultiTestCase, TestCase}; -use slashing_protection::test_utils::{pubkey, DEFAULT_GENESIS_VALIDATORS_ROOT}; -use slashing_protection::SUPPORTED_INTERCHANGE_FORMAT_VERSION; +use slashing_protection::test_utils::{DEFAULT_GENESIS_VALIDATORS_ROOT, pubkey}; use std::fs::{self, File}; use std::io::Write; use std::path::Path; diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index e1ac841905f..1bc4326b4f6 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -1,7 +1,7 @@ use crate::{ - interchange::{Interchange, SignedAttestation, SignedBlock}, - test_utils::{pubkey, DEFAULT_GENESIS_VALIDATORS_ROOT}, SigningRoot, SlashingDatabase, + interchange::{Interchange, SignedAttestation, SignedBlock}, + test_utils::{DEFAULT_GENESIS_VALIDATORS_ROOT, pubkey}, }; use serde::{Deserialize, Serialize}; use std::collections::HashSet; @@ -270,9 +270,11 @@ pub fn check_minification_invariants(interchange: &Interchange, minified: &Inter assert_eq!(mini_block.signing_root, None); // All original blocks should have slots <= the mini block. - assert!(original_blocks - .iter() - .all(|block| block.slot <= mini_block.slot)); + assert!( + original_blocks + .iter() + .all(|block| block.slot <= mini_block.slot) + ); } // Minified data should contain 1 attestation per validator, unless the validator never @@ -289,10 +291,12 @@ pub fn check_minification_invariants(interchange: &Interchange, minified: &Inter let mini_attestation = minified_attestations.first().unwrap(); assert_eq!(mini_attestation.signing_root, None); - assert!(original_attestations - .iter() - .all(|att| att.source_epoch <= mini_attestation.source_epoch - && att.target_epoch <= mini_attestation.target_epoch)); + assert!( + original_attestations + .iter() + .all(|att| att.source_epoch <= mini_attestation.source_epoch + && att.target_epoch <= mini_attestation.target_epoch) + ); } } } diff --git a/validator_client/slashing_protection/src/lib.rs b/validator_client/slashing_protection/src/lib.rs index 825a34cabc7..ded64adb492 100644 --- a/validator_client/slashing_protection/src/lib.rs +++ b/validator_client/slashing_protection/src/lib.rs @@ -13,8 +13,8 @@ pub mod test_utils; pub use crate::signed_attestation::{InvalidAttestation, SignedAttestation}; pub use crate::signed_block::{InvalidBlock, SignedBlock}; pub use crate::slashing_database::{ - InterchangeError, InterchangeImportOutcome, SlashingDatabase, - SUPPORTED_INTERCHANGE_FORMAT_VERSION, + InterchangeError, InterchangeImportOutcome, SUPPORTED_INTERCHANGE_FORMAT_VERSION, + SlashingDatabase, }; use rusqlite::Error as SQLError; use std::fmt::Display; @@ -89,7 +89,7 @@ impl SigningRoot { /// Safely parse a `SigningRoot` from the given `column` of an SQLite `row`. fn signing_root_from_row(column: usize, row: &rusqlite::Row) -> rusqlite::Result { - use rusqlite::{types::Type, Error}; + use rusqlite::{Error, types::Type}; let bytes: Vec = row.get(column)?; if bytes.len() == 32 { diff --git a/validator_client/slashing_protection/src/signed_attestation.rs b/validator_client/slashing_protection/src/signed_attestation.rs index 332f80c7045..c897b540025 100644 --- a/validator_client/slashing_protection/src/signed_attestation.rs +++ b/validator_client/slashing_protection/src/signed_attestation.rs @@ -1,4 +1,4 @@ -use crate::{signing_root_from_row, SigningRoot}; +use crate::{SigningRoot, signing_root_from_row}; use types::{AttestationData, Epoch, Hash256, SignedRoot}; /// An attestation that has previously been signed. diff --git a/validator_client/slashing_protection/src/signed_block.rs b/validator_client/slashing_protection/src/signed_block.rs index d46872529e9..5918d2c61d1 100644 --- a/validator_client/slashing_protection/src/signed_block.rs +++ b/validator_client/slashing_protection/src/signed_block.rs @@ -1,4 +1,4 @@ -use crate::{signing_root_from_row, SigningRoot}; +use crate::{SigningRoot, signing_root_from_row}; use types::{BeaconBlockHeader, Hash256, SignedRoot, Slot}; /// A block that has previously been signed. diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index f4c844d3140..9cecdaa8a59 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -4,10 +4,10 @@ use crate::interchange::{ }; use crate::signed_attestation::InvalidAttestation; use crate::signed_block::InvalidBlock; -use crate::{signing_root_from_row, NotSafe, Safe, SignedAttestation, SignedBlock, SigningRoot}; +use crate::{NotSafe, Safe, SignedAttestation, SignedBlock, SigningRoot, signing_root_from_row}; use filesystem::restrict_file_permissions; use r2d2_sqlite::SqliteConnectionManager; -use rusqlite::{params, OptionalExtension, Transaction, TransactionBehavior}; +use rusqlite::{OptionalExtension, Transaction, TransactionBehavior, params}; use std::fs::File; use std::path::Path; use std::time::Duration; @@ -356,15 +356,15 @@ impl SlashingDatabase { .prepare("SELECT MIN(slot) FROM signed_blocks WHERE validator_id = ?1")? .query_row(params![validator_id], |row| row.get(0))?; - if let Some(min_slot) = min_slot { - if slot <= min_slot { - return Err(NotSafe::InvalidBlock( - InvalidBlock::SlotViolatesLowerBound { - block_slot: slot, - bound_slot: min_slot, - }, - )); - } + if let Some(min_slot) = min_slot + && slot <= min_slot + { + return Err(NotSafe::InvalidBlock( + InvalidBlock::SlotViolatesLowerBound { + block_slot: slot, + bound_slot: min_slot, + }, + )); } Ok(Safe::Valid) @@ -467,30 +467,30 @@ impl SlashingDatabase { .prepare("SELECT MIN(source_epoch) FROM signed_attestations WHERE validator_id = ?1")? .query_row(params![validator_id], |row| row.get(0))?; - if let Some(min_source) = min_source { - if att_source_epoch < min_source { - return Err(NotSafe::InvalidAttestation( - InvalidAttestation::SourceLessThanLowerBound { - source_epoch: att_source_epoch, - bound_epoch: min_source, - }, - )); - } + if let Some(min_source) = min_source + && att_source_epoch < min_source + { + return Err(NotSafe::InvalidAttestation( + InvalidAttestation::SourceLessThanLowerBound { + source_epoch: att_source_epoch, + bound_epoch: min_source, + }, + )); } let min_target = txn .prepare("SELECT MIN(target_epoch) FROM signed_attestations WHERE validator_id = ?1")? .query_row(params![validator_id], |row| row.get(0))?; - if let Some(min_target) = min_target { - if att_target_epoch <= min_target { - return Err(NotSafe::InvalidAttestation( - InvalidAttestation::TargetLessThanOrEqLowerBound { - target_epoch: att_target_epoch, - bound_epoch: min_target, - }, - )); - } + if let Some(min_target) = min_target + && att_target_epoch <= min_target + { + return Err(NotSafe::InvalidAttestation( + InvalidAttestation::TargetLessThanOrEqLowerBound { + target_epoch: att_target_epoch, + bound_epoch: min_target, + }, + )); } // Everything has been checked, return Valid @@ -1218,9 +1218,10 @@ mod tests { assert_eq!(db.conn_pool.max_size(), POOL_SIZE); assert_eq!(db.conn_pool.connection_timeout(), CONNECTION_TIMEOUT); let conn = db.conn_pool.get().unwrap(); - assert!(conn - .pragma_query_value(None, "foreign_keys", |row| { row.get::<_, bool>(0) }) - .unwrap()); + assert!( + conn.pragma_query_value(None, "foreign_keys", |row| { row.get::<_, bool>(0) }) + .unwrap() + ); assert_eq!( conn.pragma_query_value(None, "locking_mode", |row| { row.get::<_, String>(0) }) .unwrap() diff --git a/validator_client/slashing_protection/src/test_utils.rs b/validator_client/slashing_protection/src/test_utils.rs index 8cbca12a10b..39ede58bb27 100644 --- a/validator_client/slashing_protection/src/test_utils.rs +++ b/validator_client/slashing_protection/src/test_utils.rs @@ -1,6 +1,6 @@ use crate::*; -use tempfile::{tempdir, TempDir}; -use types::{test_utils::generate_deterministic_keypair, AttestationData, BeaconBlockHeader}; +use tempfile::{TempDir, tempdir}; +use types::{AttestationData, BeaconBlockHeader, test_utils::generate_deterministic_keypair}; pub const DEFAULT_VALIDATOR_INDEX: usize = 0; pub const DEFAULT_DOMAIN: Hash256 = Hash256::ZERO; @@ -135,10 +135,12 @@ fn roundtrip_database(dir: &TempDir, db: &SlashingDatabase, is_empty: bool) { .export_all_interchange_info(DEFAULT_GENESIS_VALIDATORS_ROOT) .unwrap(); - assert!(exported - .minify() - .unwrap() - .equiv(&reexported.minify().unwrap())); + assert!( + exported + .minify() + .unwrap() + .equiv(&reexported.minify().unwrap()) + ); assert_eq!(is_empty, exported.is_empty()); } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index e1cce5c9da3..85e40ae6d37 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -1,8 +1,8 @@ use beacon_node_fallback::ApiTopic; use clap::builder::ArgPredicate; pub use clap::{FromArgMatches, Parser}; -use clap_utils::get_color_style; use clap_utils::FLAG_HEADER; +use clap_utils::get_color_style; use serde::{Deserialize, Serialize}; use std::path::PathBuf; use types::Address; diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 726aa96cf9d..04d69dc9dc1 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,11 +1,11 @@ use crate::cli::ValidatorClient; -use beacon_node_fallback::beacon_node_health::BeaconNodeSyncDistanceTiers; use beacon_node_fallback::ApiTopic; +use beacon_node_fallback::beacon_node_health::BeaconNodeSyncDistanceTiers; use clap::ArgMatches; use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, parse_required}; use directory::{ - get_network_dir, DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_SECRET_DIR, - DEFAULT_VALIDATOR_DIR, + DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_SECRET_DIR, DEFAULT_VALIDATOR_DIR, + get_network_dir, }; use eth2::types::Graffiti; use graffiti_file::GraffitiFile; @@ -102,8 +102,10 @@ impl Default for Config { let validator_dir = base_dir.join(DEFAULT_VALIDATOR_DIR); let secrets_dir = base_dir.join(DEFAULT_SECRET_DIR); - let beacon_nodes = vec![SensitiveUrl::parse(DEFAULT_BEACON_NODE) - .expect("beacon_nodes must always be a valid url.")]; + let beacon_nodes = vec![ + SensitiveUrl::parse(DEFAULT_BEACON_NODE) + .expect("beacon_nodes must always be a valid url."), + ]; Self { validator_store: ValidatorStoreConfig::default(), validator_dir, diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 73dcb793dc9..5b396ccaf53 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -7,16 +7,16 @@ use initialized_validators::InitializedValidators; use metrics::set_gauge; use monitoring_api::{MonitoringHttpClient, ProcessType}; use sensitive_url::SensitiveUrl; -use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use account_utils::validator_definitions::ValidatorDefinitions; use beacon_node_fallback::{ - start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, + BeaconNodeFallback, CandidateBeaconNode, start_fallback_updater_service, }; use clap::ArgMatches; use doppelganger_service::DoppelgangerService; use environment::RuntimeContext; -use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Timeouts}; +use eth2::{BeaconNodeHttpClient, StatusCode, Timeouts, reqwest::ClientBuilder}; use initialized_validators::Error::UnableToOpenVotingKeystore; use lighthouse_validator_store::LighthouseValidatorStore; use parking_lot::RwLock; @@ -31,7 +31,7 @@ use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::{ sync::mpsc, - time::{sleep, Duration}, + time::{Duration, sleep}, }; use tracing::{debug, error, info, warn}; use types::{EthSpec, Hash256}; diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index e4063cd2117..da6e8f35886 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -7,7 +7,7 @@ use std::collections::HashMap; use std::ops::Deref; use std::sync::Arc; use task_executor::TaskExecutor; -use tokio::time::{sleep, sleep_until, Duration, Instant}; +use tokio::time::{Duration, Instant, sleep, sleep_until}; use tracing::{debug, error, info, trace, warn}; use tree_hash::TreeHash; use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; diff --git a/validator_client/validator_services/src/block_service.rs b/validator_client/validator_services/src/block_service.rs index 4ff8b15bed2..834df67e8aa 100644 --- a/validator_client/validator_services/src/block_service.rs +++ b/validator_client/validator_services/src/block_service.rs @@ -1,7 +1,7 @@ use beacon_node_fallback::{ApiTopic, BeaconNodeFallback, Error as FallbackError, Errors}; use bls::SignatureBytes; use eth2::{BeaconNodeHttpClient, StatusCode}; -use graffiti_file::{determine_graffiti, GraffitiFile}; +use graffiti_file::{GraffitiFile, determine_graffiti}; use logging::crit; use slot_clock::SlotClock; use std::fmt::Debug; @@ -148,14 +148,13 @@ impl ProposerFallback { Err: Debug, { // If there are proposer nodes, try calling `func` on them and return early if they are successful. - if let Some(proposer_nodes) = &self.proposer_nodes { - if proposer_nodes + if let Some(proposer_nodes) = &self.proposer_nodes + && proposer_nodes .request(ApiTopic::Blocks, func.clone()) .await .is_ok() - { - return Ok(()); - } + { + return Ok(()); } // If the proposer nodes failed, try on the non-proposer nodes. @@ -353,7 +352,7 @@ impl BlockService { return Err(BlockError::Recoverable(format!( "Unable to sign block: {:?}", e - ))) + ))); } }; @@ -422,7 +421,7 @@ impl BlockService { return Err(BlockError::Recoverable(format!( "Unable to produce randao reveal signature: {:?}", e - ))) + ))); } }; diff --git a/validator_client/validator_services/src/duties_service.rs b/validator_client/validator_services/src/duties_service.rs index b4d9bae2732..009537bc439 100644 --- a/validator_client/validator_services/src/duties_service.rs +++ b/validator_client/validator_services/src/duties_service.rs @@ -7,26 +7,26 @@ //! block production. use crate::block_service::BlockServiceNotification; -use crate::sync::poll_sync_committee_duties; use crate::sync::SyncDutiesMap; +use crate::sync::poll_sync_committee_duties; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use eth2::types::{ AttesterData, BeaconCommitteeSubscription, DutiesResponse, ProposerData, StateId, ValidatorId, }; -use futures::{stream, StreamExt}; +use futures::{StreamExt, stream}; use parking_lot::RwLock; use safe_arith::{ArithError, SafeArith}; use slot_clock::SlotClock; use std::cmp::min; -use std::collections::{hash_map, BTreeMap, HashMap, HashSet}; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::collections::{BTreeMap, HashMap, HashSet, hash_map}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use task_executor::TaskExecutor; use tokio::{sync::mpsc::Sender, time::sleep}; use tracing::{debug, error, info, warn}; use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot}; -use validator_metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; +use validator_metrics::{ATTESTATION_DUTY, get_int_gauge, set_int_gauge}; use validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}; /// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch. @@ -1362,15 +1362,14 @@ async fn poll_beacon_proposers( .proposers .write() .insert(current_epoch, (dependent_root, relevant_duties)) + && dependent_root != prior_dependent_root { - if dependent_root != prior_dependent_root { - warn!( - %prior_dependent_root, - %dependent_root, - msg = "this may happen from time to time", - "Proposer duties re-org" - ) - } + warn!( + %prior_dependent_root, + %dependent_root, + msg = "this may happen from time to time", + "Proposer duties re-org" + ) } } // Don't return early here, we still want to try and produce blocks using the cached values. @@ -1433,21 +1432,20 @@ async fn notify_block_production_service( .copied() .collect::>(); - if !non_doppelganger_proposers.is_empty() { - if let Err(e) = block_service_tx + if !non_doppelganger_proposers.is_empty() + && let Err(e) = block_service_tx .send(BlockServiceNotification { slot: current_slot, block_proposers: non_doppelganger_proposers, }) .await - { - error!( - %current_slot, - error = %e, - "Failed to notify block service" - ); - }; - } + { + error!( + %current_slot, + error = %e, + "Failed to notify block service" + ); + }; } #[cfg(test)] diff --git a/validator_client/validator_services/src/notifier_service.rs b/validator_client/validator_services/src/notifier_service.rs index 55a583774ee..9c5f019c7a6 100644 --- a/validator_client/validator_services/src/notifier_service.rs +++ b/validator_client/validator_services/src/notifier_service.rs @@ -2,7 +2,7 @@ use crate::duties_service::DutiesService; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; -use tokio::time::{sleep, Duration}; +use tokio::time::{Duration, sleep}; use tracing::{debug, error, info}; use types::{ChainSpec, EthSpec}; use validator_metrics::set_gauge; diff --git a/validator_client/validator_services/src/preparation_service.rs b/validator_client/validator_services/src/preparation_service.rs index b59e3266dc9..063b11512f9 100644 --- a/validator_client/validator_services/src/preparation_service.rs +++ b/validator_client/validator_services/src/preparation_service.rs @@ -8,7 +8,7 @@ use std::ops::Deref; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; -use tokio::time::{sleep, Duration}; +use tokio::time::{Duration, sleep}; use tracing::{debug, error, info, warn}; use types::{ Address, ChainSpec, EthSpec, ProposerPreparationData, SignedValidatorRegistrationData, diff --git a/validator_client/validator_services/src/sync_committee_service.rs b/validator_client/validator_services/src/sync_committee_service.rs index be9e2918a4b..02f9f24c8a1 100644 --- a/validator_client/validator_services/src/sync_committee_service.rs +++ b/validator_client/validator_services/src/sync_committee_service.rs @@ -1,16 +1,16 @@ use crate::duties_service::DutiesService; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use eth2::types::BlockId; -use futures::future::join_all; use futures::future::FutureExt; +use futures::future::join_all; use logging::crit; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use task_executor::TaskExecutor; -use tokio::time::{sleep, sleep_until, Duration, Instant}; +use tokio::time::{Duration, Instant, sleep, sleep_until}; use tracing::{debug, error, info, trace, warn}; use types::{ ChainSpec, EthSpec, Hash256, PublicKeyBytes, Slot, SyncCommitteeSubscription, diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs index 715f1068f02..0e93b257734 100644 --- a/validator_manager/src/common.rs +++ b/validator_manager/src/common.rs @@ -1,13 +1,13 @@ -use account_utils::strip_off_newlines; pub use account_utils::STDIN_INPUTS_FLAG; +use account_utils::strip_off_newlines; use eth2::lighthouse_vc::std_types::{InterchangeJsonStr, KeystoreJsonStr}; use eth2::{ + SensitiveUrl, lighthouse_vc::{ http_client::ValidatorClientHttpClient, std_types::{ImportKeystoreStatus, ImportKeystoresRequest, SingleKeystoreResponse, Status}, types::UpdateFeeRecipientRequest, }, - SensitiveUrl, }; use serde::{Deserialize, Serialize}; use std::fs; diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index 3216417c736..19f78be2ea7 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -4,9 +4,9 @@ use account_utils::{random_password_string, read_mnemonic_from_cli, read_passwor use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; use eth2::{ + BeaconNodeHttpClient, SensitiveUrl, Timeouts, lighthouse_vc::std_types::KeystoreJsonStr, types::{StateId, ValidatorId}, - BeaconNodeHttpClient, SensitiveUrl, Timeouts, }; use eth2_wallet::WalletBuilder; use serde::{Deserialize, Serialize}; @@ -439,17 +439,16 @@ impl ValidatorsAndDeposits { different validator clients. If you understand the risks and are certain you \ wish to generate this validator again, omit the --{} flag.", voting_public_key, derivation_index, BEACON_NODE_FLAG - ))? + ))?; + } + Ok(None) => { + eprintln!("{:?} was not found in the beacon chain", voting_public_key) } - Ok(None) => eprintln!( - "{:?} was not found in the beacon chain", - voting_public_key - ), Err(e) => { return Err(format!( "Error checking if validator exists in beacon chain: {:?}", e - )) + )); } } } @@ -591,7 +590,7 @@ pub mod tests { use regex::Regex; use std::path::Path; use std::str::FromStr; - use tempfile::{tempdir, TempDir}; + use tempfile::{TempDir, tempdir}; use tree_hash::TreeHash; type E = MainnetEthSpec; diff --git a/validator_manager/src/delete_validators.rs b/validator_manager/src/delete_validators.rs index cb0557427ca..3ff0c9529d7 100644 --- a/validator_manager/src/delete_validators.rs +++ b/validator_manager/src/delete_validators.rs @@ -1,13 +1,13 @@ use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::{ - lighthouse_vc::types::{DeleteKeystoreStatus, DeleteKeystoresRequest}, SensitiveUrl, + lighthouse_vc::types::{DeleteKeystoreStatus, DeleteKeystoresRequest}, }; use serde::{Deserialize, Serialize}; use std::path::PathBuf; use types::PublicKeyBytes; -use crate::{common::vc_http_client, DumpConfig}; +use crate::{DumpConfig, common::vc_http_client}; pub const CMD: &str = "delete"; pub const VC_URL_FLAG: &str = "vc-url"; @@ -160,7 +160,7 @@ mod test { use crate::{ common::ValidatorSpecification, import_validators::tests::TestBuilder as ImportTestBuilder, }; - use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; + use validator_http_api::{Config as HttpConfig, test_utils::ApiTester}; struct TestBuilder { delete_config: Option, diff --git a/validator_manager/src/exit_validators.rs b/validator_manager/src/exit_validators.rs index 30d8c5c47db..a6bbf05fb4a 100644 --- a/validator_manager/src/exit_validators.rs +++ b/validator_manager/src/exit_validators.rs @@ -1,4 +1,4 @@ -use crate::{common::vc_http_client, DumpConfig}; +use crate::{DumpConfig, common::vc_http_client}; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; @@ -302,7 +302,7 @@ mod test { sync::Arc, }; use types::{ChainSpec, MainnetEthSpec}; - use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; + use validator_http_api::{Config as HttpConfig, test_utils::ApiTester}; use zeroize::Zeroizing; type E = MainnetEthSpec; @@ -533,13 +533,17 @@ mod test { let beacon_exit_epoch = current_epoch + 1 + max_seed_lookahead; let beacon_withdrawable_epoch = beacon_exit_epoch + min_withdrawability_delay; - assert!(validator_exit_epoch - .iter() - .all(|&epoch| epoch == beacon_exit_epoch)); + assert!( + validator_exit_epoch + .iter() + .all(|&epoch| epoch == beacon_exit_epoch) + ); - assert!(validator_withdrawable_epoch - .iter() - .all(|&epoch| epoch == beacon_withdrawable_epoch)); + assert!( + validator_withdrawable_epoch + .iter() + .all(|&epoch| epoch == beacon_withdrawable_epoch) + ); if result.is_ok() { return TestResult { result: Ok(()) }; diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index e5047f3f37e..5f5f049ed97 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -5,7 +5,7 @@ use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; use derivative::Derivative; use eth2::lighthouse_vc::types::KeystoreJsonStr; -use eth2::{lighthouse_vc::std_types::ImportKeystoreStatus, SensitiveUrl}; +use eth2::{SensitiveUrl, lighthouse_vc::std_types::ImportKeystoreStatus}; use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; @@ -279,38 +279,38 @@ async fn run(config: ImportConfig) -> Result<(), String> { for (i, validator) in validators.into_iter().enumerate() { match validator.upload(&http_client, ignore_duplicates).await { - Ok(status) => { - match status.status { - ImportKeystoreStatus::Imported => { - eprintln!("Uploaded keystore {} of {} to the VC", i + 1, count) - } - ImportKeystoreStatus::Duplicate => { - if ignore_duplicates { - eprintln!("Re-uploaded keystore {} of {} to the VC", i + 1, count) - } else { - eprintln!( - "Keystore {} of {} was uploaded to the VC, but it was a duplicate. \ - Exiting now, use --{} to allow duplicates.", - i + 1, count, IGNORE_DUPLICATES_FLAG - ); - return Err(DETECTED_DUPLICATE_MESSAGE.to_string()); - } - } - ImportKeystoreStatus::Error => { + Ok(status) => match status.status { + ImportKeystoreStatus::Imported => { + eprintln!("Uploaded keystore {} of {} to the VC", i + 1, count) + } + ImportKeystoreStatus::Duplicate => { + if ignore_duplicates { + eprintln!("Re-uploaded keystore {} of {} to the VC", i + 1, count) + } else { eprintln!( - "Upload of keystore {} of {} failed with message: {:?}. \ - A potential solution is run this command again \ - using the --{} flag, however care should be taken to ensure \ - that there are no duplicate deposits submitted.", + "Keystore {} of {} was uploaded to the VC, but it was a duplicate. \ + Exiting now, use --{} to allow duplicates.", i + 1, count, - status.message, IGNORE_DUPLICATES_FLAG ); - return Err(format!("Upload failed with {:?}", status.message)); + return Err(DETECTED_DUPLICATE_MESSAGE.to_string()); } } - } + ImportKeystoreStatus::Error => { + eprintln!( + "Upload of keystore {} of {} failed with message: {:?}. \ + A potential solution is run this command again \ + using the --{} flag, however care should be taken to ensure \ + that there are no duplicate deposits submitted.", + i + 1, + count, + status.message, + IGNORE_DUPLICATES_FLAG + ); + return Err(format!("Upload failed with {:?}", status.message)); + } + }, e @ Err(UploadError::InvalidPublicKey) => { eprintln!("Validator {} has an invalid public key", i); return Err(format!("{:?}", e)); @@ -384,8 +384,8 @@ pub mod tests { use super::*; use crate::create_validators::tests::TestBuilder as CreateTestBuilder; use std::fs::{self, File}; - use tempfile::{tempdir, TempDir}; - use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; + use tempfile::{TempDir, tempdir}; + use validator_http_api::{Config as HttpConfig, test_utils::ApiTester}; const VC_TOKEN_FILE_NAME: &str = "vc_token.json"; diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs index 9411e13251d..b064982adf4 100644 --- a/validator_manager/src/list_validators.rs +++ b/validator_manager/src/list_validators.rs @@ -8,7 +8,7 @@ use std::time::Duration; use types::{ChainSpec, EthSpec, PublicKeyBytes}; use crate::exit_validators::get_current_epoch; -use crate::{common::vc_http_client, DumpConfig}; +use crate::{DumpConfig, common::vc_http_client}; pub const CMD: &str = "list"; pub const VC_URL_FLAG: &str = "vc-url"; @@ -224,7 +224,7 @@ mod test { common::ValidatorSpecification, import_validators::tests::TestBuilder as ImportTestBuilder, }; use types::MainnetEthSpec; - use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; + use validator_http_api::{Config as HttpConfig, test_utils::ApiTester}; type E = MainnetEthSpec; struct TestBuilder { diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index abac0716738..08b50eb9293 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -3,6 +3,7 @@ use crate::DumpConfig; use account_utils::read_password_from_user; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::{ + SensitiveUrl, lighthouse_vc::{ std_types::{ DeleteKeystoreStatus, DeleteKeystoresRequest, ImportKeystoreStatus, InterchangeJsonStr, @@ -10,7 +11,6 @@ use eth2::{ }, types::{ExportKeystoresResponse, SingleExportKeystoresResponse}, }, - SensitiveUrl, }; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; @@ -458,8 +458,7 @@ async fn run(config: MoveConfig) -> Result<(), String> { Err(e) => { eprintln!( "Retrying after error: {:?}. If this error persists the user will need to \ - manually recover their keystore for validator {:?} from the mnemonic." - , + manually recover their keystore for validator {:?} from the mnemonic.", e, pubkey_to_move ); } @@ -668,8 +667,8 @@ mod test { use crate::import_validators::tests::TestBuilder as ImportTestBuilder; use account_utils::validator_definitions::SigningDefinition; use std::fs; - use tempfile::{tempdir, TempDir}; - use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; + use tempfile::{TempDir, tempdir}; + use validator_http_api::{Config as HttpConfig, test_utils::ApiTester}; const SRC_VC_TOKEN_FILE_NAME: &str = "src_vc_token.json"; const DEST_VC_TOKEN_FILE_NAME: &str = "dest_vc_token.json"; @@ -901,13 +900,13 @@ mod test { ); if self.reuse_password_files.is_some() { assert!( - src_vc - .secrets_dir - .path() - .join(format!("{:?}", pubkey)) - .exists(), - "the source password file was used by another validator and should not be deleted" - ) + src_vc + .secrets_dir + .path() + .join(format!("{:?}", pubkey)) + .exists(), + "the source password file was used by another validator and should not be deleted" + ) } else { assert!( !src_vc