diff --git a/Cargo.toml b/Cargo.toml index 384058422..83e19b578 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [workspace.package] version = "0.2.2" edition = "2024" -rust-version = "1.86" +rust-version = "1.88" license = "MIT OR Apache-2.0" homepage = "https://github.com/flashbots/op-rbuilder" repository = "https://github.com/flashbots/op-rbuilder" diff --git a/crates/op-rbuilder/build.rs b/crates/op-rbuilder/build.rs index 97acc1fef..66ebf928b 100644 --- a/crates/op-rbuilder/build.rs +++ b/crates/op-rbuilder/build.rs @@ -32,10 +32,7 @@ fn main() -> Result<(), Box> { emitter.add_instructions(&build_builder)?; - let cargo_builder = CargoBuilder::default() - .features(true) - .target_triple(true) - .build()?; + let cargo_builder = CargoBuilder::default().features(true).target_triple(true).build()?; emitter.add_instructions(&cargo_builder)?; diff --git a/crates/op-rbuilder/src/args/mod.rs b/crates/op-rbuilder/src/args/mod.rs index 4188e39cc..8eb655e13 100644 --- a/crates/op-rbuilder/src/args/mod.rs +++ b/crates/op-rbuilder/src/args/mod.rs @@ -11,12 +11,11 @@ mod op; mod playground; /// This trait is used to extend Reth's CLI with additional functionality that -/// are specific to the OP builder, such as populating default values for CLI arguments -/// when running in the playground mode or checking the builder mode. -/// +/// are specific to the OP builder, such as populating default values for CLI +/// arguments when running in the playground mode or checking the builder mode. pub trait CliExt { - /// Populates the default values for the CLI arguments when the user specifies - /// the `--builder.playground` flag. + /// Populates the default values for the CLI arguments when the user + /// specifies the `--builder.playground` flag. fn populate_defaults(self) -> Self; /// Returns the builder mode that the node is started with. @@ -38,13 +37,15 @@ impl CliExt for Cli { /// and if so, populates the default values for the CLI arguments from the /// playground configuration. /// - /// The `--builder.playground` flag is used to populate the CLI arguments with - /// default values for running the builder against the playground environment. + /// The `--builder.playground` flag is used to populate the CLI arguments + /// with default values for running the builder against the playground + /// environment. /// /// The values are populated from the default directory of the playground /// configuration, which is `$HOME/.playground/devnet/` by default. /// - /// Any manually specified CLI arguments by the user will override the defaults. + /// Any manually specified CLI arguments by the user will override the + /// defaults. fn populate_defaults(self) -> Self { let Commands::Node(ref node_command) = self.command else { // playground defaults are only relevant if running the node commands. @@ -65,13 +66,13 @@ impl CliExt for Cli { Cli::set_version().populate_defaults() } - /// Returns the type of builder implementation that the node is started with. - /// Currently supports `Standard` and `Flashblocks` modes. + /// Returns the type of builder implementation that the node is started + /// with. Currently supports `Standard` and `Flashblocks` modes. fn builder_mode(&self) -> BuilderMode { - if let Commands::Node(ref node_command) = self.command { - if node_command.ext.flashblocks.enabled { - return BuilderMode::Flashblocks; - } + if let Commands::Node(ref node_command) = self.command && + node_command.ext.flashblocks.enabled + { + return BuilderMode::Flashblocks; } BuilderMode::Standard } diff --git a/crates/op-rbuilder/src/args/op.rs b/crates/op-rbuilder/src/args/op.rs index 760249b5a..eb3579a00 100644 --- a/crates/op-rbuilder/src/args/op.rs +++ b/crates/op-rbuilder/src/args/op.rs @@ -26,11 +26,7 @@ pub struct OpRbuilderArgs { pub builder_signer: Option, /// chain block time in milliseconds - #[arg( - long = "rollup.chain-block-time", - default_value = "1000", - env = "CHAIN_BLOCK_TIME" - )] + #[arg(long = "rollup.chain-block-time", default_value = "1000", env = "CHAIN_BLOCK_TIME")] pub chain_block_time: u64, /// max gas a transaction can use @@ -41,14 +37,16 @@ pub struct OpRbuilderArgs { #[arg(long = "builder.log-pool-transactions", default_value = "false")] pub log_pool_transactions: bool, - /// How much time extra to wait for the block building job to complete and not get garbage collected + /// How much time extra to wait for the block building job to complete and + /// not get garbage collected #[arg(long = "builder.extra-block-deadline-secs", default_value = "20")] pub extra_block_deadline_secs: u64, /// Whether to enable revert protection by default #[arg(long = "builder.enable-revert-protection", default_value = "false")] pub enable_revert_protection: bool, - /// Path to builder playgorund to automatically start up the node connected to it + /// Path to builder playground to automatically start up the node connected + /// to it #[arg( long = "builder.playground", num_args = 0..=1, @@ -70,9 +68,7 @@ pub struct OpRbuilderArgs { impl Default for OpRbuilderArgs { fn default() -> Self { let args = crate::args::Cli::parse_from(["dummy", "node"]); - let Commands::Node(node_command) = args.command else { - unreachable!() - }; + let Commands::Node(node_command) = args.command else { unreachable!() }; node_command.ext } } @@ -87,8 +83,9 @@ fn expand_path(s: &str) -> Result { /// Parameters for Flashblocks configuration /// The names in the struct are prefixed with `flashblocks` to avoid conflicts -/// with the standard block building configuration since these args are flattened -/// into the main `OpRbuilderArgs` struct with the other rollup/node args. +/// with the standard block building configuration since these args are +/// flattened into the main `OpRbuilderArgs` struct with the other rollup/node +/// args. #[derive(Debug, Clone, PartialEq, Eq, clap::Args)] pub struct FlashblocksArgs { /// When set to true, the builder will build flashblocks @@ -96,56 +93,34 @@ pub struct FlashblocksArgs { /// /// The default value will change in the future once the flashblocks /// feature is stable. - #[arg( - long = "flashblocks.enabled", - default_value = "false", - env = "ENABLE_FLASHBLOCKS" - )] + #[arg(long = "flashblocks.enabled", default_value = "false", env = "ENABLE_FLASHBLOCKS")] pub enabled: bool, - /// The port that we bind to for the websocket server that provides flashblocks - #[arg( - long = "flashblocks.port", - env = "FLASHBLOCKS_WS_PORT", - default_value = "1111" - )] + /// The port that we bind to for the websocket server that provides + /// flashblocks + #[arg(long = "flashblocks.port", env = "FLASHBLOCKS_WS_PORT", default_value = "1111")] pub flashblocks_port: u16, - /// The address that we bind to for the websocket server that provides flashblocks - #[arg( - long = "flashblocks.addr", - env = "FLASHBLOCKS_WS_ADDR", - default_value = "127.0.0.1" - )] + /// The address that we bind to for the websocket server that provides + /// flashblocks + #[arg(long = "flashblocks.addr", env = "FLASHBLOCKS_WS_ADDR", default_value = "127.0.0.1")] pub flashblocks_addr: String, /// flashblock block time in milliseconds - #[arg( - long = "flashblocks.block-time", - default_value = "250", - env = "FLASHBLOCK_BLOCK_TIME" - )] + #[arg(long = "flashblocks.block-time", default_value = "250", env = "FLASHBLOCK_BLOCK_TIME")] pub flashblocks_block_time: u64, - /// Builder would always thry to produce fixed number of flashblocks without regard to time of - /// FCU arrival. + /// Builder would always thry to produce fixed number of flashblocks without + /// regard to time of FCU arrival. /// In cases of late FCU it could lead to partially filled blocks. - #[arg( - long = "flashblocks.fixed", - default_value = "false", - env = "FLASHBLOCK_FIXED" - )] + #[arg(long = "flashblocks.fixed", default_value = "false", env = "FLASHBLOCK_FIXED")] pub flashblocks_fixed: bool, /// Time by which blocks would be completed earlier in milliseconds. /// - /// This time used to account for latencies, this time would be deducted from total block - /// building time before calculating number of fbs. - #[arg( - long = "flashblocks.leeway-time", - default_value = "75", - env = "FLASHBLOCK_LEEWAY_TIME" - )] + /// This time used to account for latencies, this time would be deducted + /// from total block building time before calculating number of fbs. + #[arg(long = "flashblocks.leeway-time", default_value = "75", env = "FLASHBLOCK_LEEWAY_TIME")] pub flashblocks_leeway_time: u64, /// Should we calculate state root for each flashblock @@ -160,9 +135,7 @@ pub struct FlashblocksArgs { impl Default for FlashblocksArgs { fn default() -> Self { let args = crate::args::Cli::parse_from(["dummy", "node"]); - let Commands::Node(node_command) = args.command else { - unreachable!() - }; + let Commands::Node(node_command) = args.command else { unreachable!() }; node_command.ext.flashblocks } } @@ -178,11 +151,10 @@ pub struct TelemetryArgs { #[arg(long = "telemetry.otlp-headers", env = "OTEL_EXPORTER_OTLP_HEADERS")] pub otlp_headers: Option, - /// Inverted sampling frequency in blocks. 1 - each block, 100 - every 100th block. - #[arg( - long = "telemetry.sampling-ratio", - env = "SAMPLING_RATIO", - default_value = "100" - )] + /// Inverted sampling frequency in blocks. + /// + /// 1 - each block + /// 100 - every 100th block. + #[arg(long = "telemetry.sampling-ratio", env = "SAMPLING_RATIO", default_value = "100")] pub sampling_ratio: u64, } diff --git a/crates/op-rbuilder/src/args/playground.rs b/crates/op-rbuilder/src/args/playground.rs index 3ef24c1d0..3a5818623 100644 --- a/crates/op-rbuilder/src/args/playground.rs +++ b/crates/op-rbuilder/src/args/playground.rs @@ -80,10 +80,7 @@ impl PlaygroundOptions { /// Creates a new `PlaygroundOptions` instance with the specified genesis path. pub(super) fn new(path: &Path) -> Result { if !path.exists() { - return Err(eyre!( - "Playground data directory {} does not exist", - path.display() - )); + return Err(eyre!("Playground data directory {} does not exist", path.display())); } let chain = OpChainSpecParser::parse(&existing_path(path, "l2-genesis.json")?)?; @@ -127,9 +124,8 @@ impl PlaygroundOptions { // either via the command line or an environment variable. Otherwise, don't // override the user provided values. let matches = Cli::command().get_matches(); - let matches = matches - .subcommand_matches("node") - .expect("validated that we are in the node command"); + let matches = + matches.subcommand_matches("node").expect("validated that we are in the node command"); if matches.value_source("chain").is_default() { node.chain = self.chain; @@ -223,9 +219,7 @@ fn extract_chain_block_time(basepath: &Path) -> Result { fn extract_deterministic_p2p_key(basepath: &Path) -> Result { let key = read_to_string(existing_path(basepath, "enode-key-1.txt")?)?; - Ok(SecretKey::from_slice( - &hex::decode(key).map_err(|e| eyre!("Invalid hex key: {e}"))?, - )?) + Ok(SecretKey::from_slice(&hex::decode(key).map_err(|e| eyre!("Invalid hex key: {e}"))?)?) } fn read_docker_compose(basepath: &Path) -> Result { @@ -238,9 +232,7 @@ fn extract_service_command_flag(basepath: &Path, service: &str, flag: &str) -> R let docker_compose = read_docker_compose(basepath)?; let args = docker_compose["services"][service]["command"] .as_sequence() - .ok_or(eyre!( - "docker-compose.yaml is missing command line arguments for {service}" - ))? + .ok_or(eyre!("docker-compose.yaml is missing command line arguments for {service}"))? .iter() .map(|s| { s.as_str().ok_or_else(|| { @@ -254,9 +246,8 @@ fn extract_service_command_flag(basepath: &Path, service: &str, flag: &str) -> R .position(|arg| *arg == flag) .ok_or_else(|| eyre!("docker_compose: {flag} not found on {service} service"))?; - let value = args - .get(index + 1) - .ok_or_else(|| eyre!("docker_compose: {flag} value not found"))?; + let value = + args.get(index + 1).ok_or_else(|| eyre!("docker_compose: {flag} value not found"))?; Ok(value.to_string()) } @@ -274,9 +265,7 @@ fn extract_trusted_peer_port(basepath: &Path) -> Result { // command line arguments used to start the op-geth service let Some(opgeth_args) = docker_compose["services"]["op-geth"]["command"][1].as_str() else { - return Err(eyre!( - "docker-compose.yaml is missing command line arguments for op-geth" - )); + return Err(eyre!("docker-compose.yaml is missing command line arguments for op-geth")); }; let opgeth_args = opgeth_args.split_whitespace().collect::>(); @@ -289,16 +278,12 @@ fn extract_trusted_peer_port(basepath: &Path) -> Result { .get(port_param_position + 1) .ok_or_else(|| eyre!("docker_compose: --port value not found"))?; - let port_value = port_value - .parse::() - .map_err(|e| eyre!("Invalid port value: {e}"))?; + let port_value = port_value.parse::().map_err(|e| eyre!("Invalid port value: {e}"))?; // now we need to find the external port of the op-geth service from the docker-compose.yaml // ports mapping used to start the op-geth service let Some(opgeth_ports) = docker_compose["services"]["op-geth"]["ports"].as_sequence() else { - return Err(eyre!( - "docker-compose.yaml is missing ports mapping for op-geth" - )); + return Err(eyre!("docker-compose.yaml is missing ports mapping for op-geth")); }; let ports_mapping = opgeth_ports .iter() diff --git a/crates/op-rbuilder/src/bin/tester/main.rs b/crates/op-rbuilder/src/bin/tester/main.rs index 9345a8c02..654d2049e 100644 --- a/crates/op-rbuilder/src/bin/tester/main.rs +++ b/crates/op-rbuilder/src/bin/tester/main.rs @@ -74,9 +74,7 @@ pub async fn run_system(validation: bool) -> eyre::Result<()> { let mut driver = ChainDriver::::remote(provider, engine_api); if validation { - driver = driver - .with_validation_node(ExternalNode::reth().await?) - .await?; + driver = driver.with_validation_node(ExternalNode::reth().await?).await?; } // Infinite loop generating blocks diff --git a/crates/op-rbuilder/src/builders/builder_tx.rs b/crates/op-rbuilder/src/builders/builder_tx.rs index 8ff168b14..0cddd92f4 100644 --- a/crates/op-rbuilder/src/builders/builder_tx.rs +++ b/crates/op-rbuilder/src/builders/builder_tx.rs @@ -85,9 +85,8 @@ pub trait BuilderTransactions: Debug { db: &mut State, ) -> Result, BuilderTransactionError> { { - let mut evm = builder_ctx - .evm_config - .evm_with_env(&mut *db, builder_ctx.evm_env.clone()); + let mut evm = + builder_ctx.evm_config.evm_with_env(&mut *db, builder_ctx.evm_env.clone()); let mut invalid: HashSet
= HashSet::new(); @@ -109,7 +108,8 @@ pub trait BuilderTransactions: Debug { continue; } - // Add gas used by the transaction to cumulative gas used, before creating the receipt + // Add gas used by the transaction to cumulative gas used, before creating the + // receipt let gas_used = result.gas_used(); info.cumulative_gas_used += gas_used; @@ -127,8 +127,7 @@ pub trait BuilderTransactions: Debug { // Append sender and transaction to the respective lists info.executed_senders.push(builder_tx.signed_tx.signer()); - info.executed_transactions - .push(builder_tx.signed_tx.clone().into_inner()); + info.executed_transactions.push(builder_tx.signed_tx.clone().into_inner()); } // Release the db reference by dropping evm @@ -151,9 +150,7 @@ pub trait BuilderTransactions: Debug { .with_bundle_prestate(db.bundle_state.clone()) .with_bundle_update() .build(); - let mut evm = ctx - .evm_config - .evm_with_env(&mut simulation_state, ctx.evm_env.clone()); + let mut evm = ctx.evm_config.evm_with_env(&mut simulation_state, ctx.evm_env.clone()); for builder_tx in builder_txs { let ResultAndState { state, .. } = evm diff --git a/crates/op-rbuilder/src/builders/context.rs b/crates/op-rbuilder/src/builders/context.rs index 9604ced71..65e94d6f9 100644 --- a/crates/op-rbuilder/src/builders/context.rs +++ b/crates/op-rbuilder/src/builders/context.rs @@ -93,9 +93,7 @@ impl OpPayloadBuilderCtx { /// Returns the block gas limit to target. pub(super) fn block_gas_limit(&self) -> u64 { - self.attributes() - .gas_limit - .unwrap_or(self.evm_env.block_env.gas_limit) + self.attributes().gas_limit.unwrap_or(self.evm_env.block_env.gas_limit) } /// Returns the block number for the block. @@ -110,10 +108,7 @@ impl OpPayloadBuilderCtx { /// Returns the current blob gas price. pub(super) fn get_blob_gasprice(&self) -> Option { - self.evm_env - .block_env - .blob_gasprice() - .map(|gasprice| gasprice as u64) + self.evm_env.block_env.blob_gasprice().map(|gasprice| gasprice as u64) } /// Returns the blob fields for the header. @@ -123,11 +118,7 @@ impl OpPayloadBuilderCtx { // OP doesn't support blobs/EIP-4844. // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions // Need [Some] or [None] based on hardfork to match block hash. - if self.is_ecotone_active() { - (Some(0), Some(0)) - } else { - (None, None) - } + if self.is_ecotone_active() { (Some(0), Some(0)) } else { (None, None) } } /// Returns the extra data for the block. @@ -159,32 +150,27 @@ impl OpPayloadBuilderCtx { /// Returns true if regolith is active for the payload. pub(super) fn is_regolith_active(&self) -> bool { - self.chain_spec - .is_regolith_active_at_timestamp(self.attributes().timestamp()) + self.chain_spec.is_regolith_active_at_timestamp(self.attributes().timestamp()) } /// Returns true if ecotone is active for the payload. pub(super) fn is_ecotone_active(&self) -> bool { - self.chain_spec - .is_ecotone_active_at_timestamp(self.attributes().timestamp()) + self.chain_spec.is_ecotone_active_at_timestamp(self.attributes().timestamp()) } /// Returns true if canyon is active for the payload. pub(super) fn is_canyon_active(&self) -> bool { - self.chain_spec - .is_canyon_active_at_timestamp(self.attributes().timestamp()) + self.chain_spec.is_canyon_active_at_timestamp(self.attributes().timestamp()) } /// Returns true if holocene is active for the payload. pub(super) fn is_holocene_active(&self) -> bool { - self.chain_spec - .is_holocene_active_at_timestamp(self.attributes().timestamp()) + self.chain_spec.is_holocene_active_at_timestamp(self.attributes().timestamp()) } /// Returns true if isthmus is active for the payload. pub(super) fn is_isthmus_active(&self) -> bool { - self.chain_spec - .is_isthmus_active_at_timestamp(self.attributes().timestamp()) + self.chain_spec.is_isthmus_active_at_timestamp(self.attributes().timestamp()) } /// Returns the chain id @@ -247,12 +233,9 @@ impl OpPayloadBuilderCtx { // purely for the purposes of utilizing the `evm_config.tx_env`` function. // Deposit transactions do not have signatures, so if the tx is a deposit, this // will just pull in its `from` address. - let sequencer_tx = sequencer_tx - .value() - .try_clone_into_recovered() - .map_err(|_| { - PayloadBuilderError::other(OpPayloadBuilderError::TransactionEcRecoverFailed) - })?; + let sequencer_tx = sequencer_tx.value().try_clone_into_recovered().map_err(|_| { + PayloadBuilderError::other(OpPayloadBuilderError::TransactionEcRecoverFailed) + })?; // Cache the depositor account prior to the state transition for the deposit nonce. // @@ -339,12 +322,8 @@ impl OpPayloadBuilderCtx { // Remove once we merge Reth 1.4.4 // Fixed in https://github.com/paradigmxyz/reth/pull/16514 - self.metrics - .da_block_size_limit - .set(block_da_limit.map_or(-1.0, |v| v as f64)); - self.metrics - .da_tx_size_limit - .set(tx_da_limit.map_or(-1.0, |v| v as f64)); + self.metrics.da_block_size_limit.set(block_da_limit.map_or(-1.0, |v| v as f64)); + self.metrics.da_tx_size_limit.set(tx_da_limit.map_or(-1.0, |v| v as f64)); let block_attr = BlockConditionalAttributes { number: self.block_number(), @@ -361,9 +340,11 @@ impl OpPayloadBuilderCtx { let tx_hash = tx.tx_hash(); // exclude reverting transaction if: - // - the transaction comes from a bundle (is_some) and the hash **is not** in reverted hashes - // Note that we need to use the Option to signal whether the transaction comes from a bundle, - // otherwise, we would exclude all transactions that are not in the reverted hashes. + // - the transaction comes from a bundle (is_some) and the hash **is not** in reverted + // hashes + // Note that we need to use the Option to signal whether the transaction comes from a + // bundle, otherwise, we would exclude all transactions that are not in the + // reverted hashes. let is_bundle_tx = reverted_hashes.is_some(); let exclude_reverting_txs = is_bundle_tx && !reverted_hashes.unwrap().contains(&tx_hash); @@ -382,19 +363,20 @@ impl OpPayloadBuilderCtx { num_txs_considered += 1; // TODO: ideally we should get this from the txpool stream - if let Some(conditional) = conditional - && !conditional.matches_block_attributes(&block_attr) + if let Some(conditional) = conditional && + !conditional.matches_block_attributes(&block_attr) { best_txs.mark_invalid(tx.signer(), tx.nonce()); continue; } - // TODO: remove this condition and feature once we are comfortable enabling interop for everything + // TODO: remove this condition and feature once we are comfortable enabling interop for + // everything if cfg!(feature = "interop") { - // We skip invalid cross chain txs, they would be removed on the next block update in - // the maintenance job - if let Some(interop) = interop - && !is_valid_interop(interop, self.config.attributes.timestamp()) + // We skip invalid cross chain txs, they would be removed on the next block update + // in the maintenance job + if let Some(interop) = interop && + !is_valid_interop(interop, self.config.attributes.timestamp()) { log_txn(TxnExecutionResult::InteropFailed); best_txs.mark_invalid(tx.signer(), tx.nonce()); @@ -455,9 +437,7 @@ impl OpPayloadBuilderCtx { } }; - self.metrics - .tx_simulation_duration - .record(tx_simulation_start_time.elapsed()); + self.metrics.tx_simulation_duration.record(tx_simulation_start_time.elapsed()); self.metrics.tx_byte_size.record(tx.inner().size() as f64); num_txs_simulated += 1; @@ -465,11 +445,7 @@ impl OpPayloadBuilderCtx { // reverted or not, as this is a check against maliciously searchers // sending txs that are expensive to compute but always revert. let gas_used = result.gas_used(); - if self - .address_gas_limiter - .consume_gas(tx.signer(), gas_used) - .is_err() - { + if self.address_gas_limiter.consume_gas(tx.signer(), gas_used).is_err() { log_txn(TxnExecutionResult::MaxGasUsageExceeded); best_txs.mark_invalid(tx.signer(), tx.nonce()); continue; @@ -495,12 +471,12 @@ impl OpPayloadBuilderCtx { // add gas used by the transaction to cumulative gas used, before creating the // receipt - if let Some(max_gas_per_txn) = self.max_gas_per_txn { - if gas_used > max_gas_per_txn { - log_txn(TxnExecutionResult::MaxGasUsageExceeded); - best_txs.mark_invalid(tx.signer(), tx.nonce()); - continue; - } + if let Some(max_gas_per_txn) = self.max_gas_per_txn && + gas_used > max_gas_per_txn + { + log_txn(TxnExecutionResult::MaxGasUsageExceeded); + best_txs.mark_invalid(tx.signer(), tx.nonce()); + continue; } info.cumulative_gas_used += gas_used; diff --git a/crates/op-rbuilder/src/builders/flashblocks/best_txs.rs b/crates/op-rbuilder/src/builders/flashblocks/best_txs.rs index 2f36c96dc..18e2fefd0 100644 --- a/crates/op-rbuilder/src/builders/flashblocks/best_txs.rs +++ b/crates/op-rbuilder/src/builders/flashblocks/best_txs.rs @@ -13,8 +13,8 @@ where { inner: reth_payload_util::BestPayloadTransactions, current_flashblock_number: u64, - // Transactions that were already commited to the state. Using them again would cause NonceTooLow - // so we skip them + // Transactions that were already commited to the state. Using them again would cause + // NonceTooLow so we skip them commited_transactions: HashSet, } @@ -24,11 +24,7 @@ where I: Iterator>>, { pub(super) fn new(inner: reth_payload_util::BestPayloadTransactions) -> Self { - Self { - inner, - current_flashblock_number: 0, - commited_transactions: Default::default(), - } + Self { inner, current_flashblock_number: 0, commited_transactions: Default::default() } } /// Replaces current iterator with new one. We use it on new flashblock building, to refresh @@ -67,27 +63,27 @@ where let flashblock_number_max = tx.flashblock_number_max(); // Check min flashblock requirement - if let Some(min) = flashblock_number_min { - if self.current_flashblock_number < min { - continue; - } + if let Some(min) = flashblock_number_min && + self.current_flashblock_number < min + { + continue; } // Check max flashblock requirement - if let Some(max) = flashblock_number_max { - if self.current_flashblock_number > max { - debug!( - target: "payload_builder", - tx_hash = ?tx.hash(), - sender = ?tx.sender(), - nonce = tx.nonce(), - current_flashblock = self.current_flashblock_number, - max_flashblock = max, - "Bundle flashblock max exceeded" - ); - self.inner.mark_invalid(tx.sender(), tx.nonce()); - continue; - } + if let Some(max) = flashblock_number_max && + self.current_flashblock_number > max + { + debug!( + target: "payload_builder", + tx_hash = ?tx.hash(), + sender = ?tx.sender(), + nonce = tx.nonce(), + current_flashblock = self.current_flashblock_number, + max_flashblock = max, + "Bundle flashblock max exceeded" + ); + self.inner.mark_invalid(tx.sender(), tx.nonce()); + continue; } return Some(tx); diff --git a/crates/op-rbuilder/src/builders/flashblocks/builder_tx.rs b/crates/op-rbuilder/src/builders/flashblocks/builder_tx.rs index be29e6aaa..33628fc6a 100644 --- a/crates/op-rbuilder/src/builders/flashblocks/builder_tx.rs +++ b/crates/op-rbuilder/src/builders/flashblocks/builder_tx.rs @@ -31,10 +31,7 @@ impl FlashblocksBuilderTx { signer: Option, flashtestations_builder_tx: Option, ) -> Self { - Self { - signer, - flashtestations_builder_tx, - } + Self { signer, flashtestations_builder_tx } } pub(super) fn simulate_builder_tx( @@ -50,11 +47,7 @@ impl FlashblocksBuilderTx { let da_size = op_alloy_flz::tx_estimated_size_fjord_bytes( signed_tx.encoded_2718().as_slice(), ); - Ok(Some(BuilderTransactionCtx { - gas_used, - da_size, - signed_tx, - })) + Ok(Some(BuilderTransactionCtx { gas_used, da_size, signed_tx })) } None => Ok(None), } @@ -63,11 +56,7 @@ impl FlashblocksBuilderTx { fn estimate_builder_tx_gas(&self, input: &[u8]) -> u64 { // Count zero and non-zero bytes let (zero_bytes, nonzero_bytes) = input.iter().fold((0, 0), |(zeros, nonzeros), &byte| { - if byte == 0 { - (zeros + 1, nonzeros) - } else { - (zeros, nonzeros + 1) - } + if byte == 0 { (zeros + 1, nonzeros) } else { (zeros, nonzeros + 1) } }); // Calculate gas cost (4 gas per zero byte, 16 gas per non-zero byte) @@ -107,9 +96,7 @@ impl FlashblocksBuilderTx { ..Default::default() }); // Sign the transaction - let builder_tx = signer - .sign_tx(tx) - .map_err(BuilderTransactionError::SigningError)?; + let builder_tx = signer.sign_tx(tx).map_err(BuilderTransactionError::SigningError)?; Ok(builder_tx) } diff --git a/crates/op-rbuilder/src/builders/flashblocks/config.rs b/crates/op-rbuilder/src/builders/flashblocks/config.rs index c852a3547..0919aed6e 100644 --- a/crates/op-rbuilder/src/builders/flashblocks/config.rs +++ b/crates/op-rbuilder/src/builders/flashblocks/config.rs @@ -11,19 +11,21 @@ pub struct FlashblocksConfig { /// new flashblocks updates. pub ws_addr: SocketAddr, - /// How often a flashblock is produced. This is independent of the block time of the chain. - /// Each block will contain one or more flashblocks. On average, the number of flashblocks - /// per block is equal to the block time divided by the flashblock interval. + /// How often a flashblock is produced. This is independent of the block + /// time of the chain. Each block will contain one or more flashblocks. + /// On average, the number of flashblocks per block is equal to the + /// block time divided by the flashblock interval + 1 with the initial + /// flashblock in the block. pub interval: Duration, - /// How much time would be deducted from block build time to account for latencies in - /// milliseconds. + /// How much time would be deducted from block build time to account for + /// latencies in milliseconds. /// - /// If dynamic_adjustment is false this value would be deducted from first flashblock and - /// it shouldn't be more than interval + /// If dynamic_adjustment is false this value would be deducted from first + /// flashblock and it shouldn't be more than interval /// - /// If dynamic_adjustment is true this value would be deducted from first flashblock and - /// it shouldn't be more than interval + /// If dynamic_adjustment is true this value would be deducted from first + /// flashblock and it shouldn't be more than interval pub leeway_time: Duration, /// Disables dynamic flashblocks number adjustment based on FCU arrival time @@ -62,13 +64,7 @@ impl TryFrom for FlashblocksConfig { let calculate_state_root = args.flashblocks.flashblocks_calculate_state_root; - Ok(Self { - ws_addr, - interval, - leeway_time, - fixed, - calculate_state_root, - }) + Ok(Self { ws_addr, interval, leeway_time, fixed, calculate_state_root }) } } diff --git a/crates/op-rbuilder/src/builders/flashblocks/mod.rs b/crates/op-rbuilder/src/builders/flashblocks/mod.rs index 10503fa5d..9f473fc17 100644 --- a/crates/op-rbuilder/src/builders/flashblocks/mod.rs +++ b/crates/op-rbuilder/src/builders/flashblocks/mod.rs @@ -10,8 +10,9 @@ mod payload; mod service; mod wspub; -/// Block building strategy that progressively builds chunks of a block and makes them available -/// through a websocket update, then merges them into a full block every chain block time. +/// Block building strategy that progressively builds chunks of a block and +/// makes them available through a websocket update, then merges them into a +/// full block every chain block time. pub struct FlashblocksBuilder; impl super::PayloadBuilder for FlashblocksBuilder { diff --git a/crates/op-rbuilder/src/builders/flashblocks/payload.rs b/crates/op-rbuilder/src/builders/flashblocks/payload.rs index 05228f829..c52edfe2e 100644 --- a/crates/op-rbuilder/src/builders/flashblocks/payload.rs +++ b/crates/op-rbuilder/src/builders/flashblocks/payload.rs @@ -227,25 +227,18 @@ where best_payload: BlockCell, ) -> Result<(), PayloadBuilderError> { let block_build_start_time = Instant::now(); - let BuildArguments { - mut cached_reads, - config, - cancel: block_cancel, - } = args; + let BuildArguments { mut cached_reads, config, cancel: block_cancel } = args; // We log only every 100th block to reduce usage - let span = if cfg!(feature = "telemetry") - && config.parent_header.number % self.config.sampling_ratio == 0 + let span = if cfg!(feature = "telemetry") && + config.parent_header.number % self.config.sampling_ratio == 0 { span!(Level::INFO, "build_payload") } else { tracing::Span::none() }; let _entered = span.enter(); - span.record( - "payload_id", - config.attributes.payload_attributes.id.to_string(), - ); + span.record("payload_id", config.attributes.payload_attributes.id.to_string()); let chain_spec = self.client.chain_spec(); let timestamp = config.attributes.timestamp(); @@ -254,14 +247,8 @@ where timestamp, suggested_fee_recipient: config.attributes.suggested_fee_recipient(), prev_randao: config.attributes.prev_randao(), - gas_limit: config - .attributes - .gas_limit - .unwrap_or(config.parent_header.gas_limit), - parent_beacon_block_root: config - .attributes - .payload_attributes - .parent_beacon_block_root, + gas_limit: config.attributes.gas_limit.unwrap_or(config.parent_header.gas_limit), + parent_beacon_block_root: config.attributes.payload_attributes.parent_beacon_block_root, extra_data: if chain_spec.is_holocene_active_at_timestamp(timestamp) { config .attributes @@ -308,10 +295,8 @@ where // 1. execute the pre steps and seal an early block with that let sequencer_tx_start_time = Instant::now(); - let mut state = State::builder() - .with_database(cached_reads.as_db_mut(db)) - .with_bundle_update() - .build(); + let mut state = + State::builder().with_database(cached_reads.as_db_mut(db)).with_bundle_update().build(); let mut info = execute_pre_steps(&mut state, &ctx)?; let sequencer_tx_time = sequencer_tx_start_time.elapsed(); @@ -322,8 +307,7 @@ where let builder_txs = if ctx.attributes().no_tx_pool { vec![] } else { - self.builder_tx - .add_builder_txs(&state_provider, &mut info, &ctx, &mut state)? + self.builder_tx.add_builder_txs(&state_provider, &mut info, &ctx, &mut state)? }; // We subtract gas limit and da limit for builder transaction from the whole limit @@ -334,20 +318,17 @@ where &mut state, &ctx, &mut info, - calculate_state_root || ctx.attributes().no_tx_pool, // need to calculate state root for CL sync + calculate_state_root || ctx.attributes().no_tx_pool, /* need to calculate state root + * for CL sync */ )?; best_payload.set(payload.clone()); self.send_payload_to_engine(payload); // not emitting flashblock if no_tx_pool in FCU, it's just syncing if !ctx.attributes().no_tx_pool { - let flashblock_byte_size = self - .ws_pub - .publish(&fb_payload) - .map_err(PayloadBuilderError::other)?; - ctx.metrics - .flashblock_byte_size_histogram - .record(flashblock_byte_size as f64); + let flashblock_byte_size = + self.ws_pub.publish(&fb_payload).map_err(PayloadBuilderError::other)?; + ctx.metrics.flashblock_byte_size_histogram.record(flashblock_byte_size as f64); } info!( @@ -363,18 +344,10 @@ where ); let total_block_building_time = block_build_start_time.elapsed(); - ctx.metrics - .total_block_built_duration - .record(total_block_building_time); - ctx.metrics - .total_block_built_gauge - .set(total_block_building_time); - ctx.metrics - .payload_num_tx - .record(info.executed_transactions.len() as f64); - ctx.metrics - .payload_num_tx_gauge - .set(info.executed_transactions.len() as f64); + ctx.metrics.total_block_built_duration.record(total_block_building_time); + ctx.metrics.total_block_built_gauge.set(total_block_building_time); + ctx.metrics.payload_num_tx.record(info.executed_transactions.len() as f64); + ctx.metrics.payload_num_tx_gauge.set(info.executed_transactions.len() as f64); // return early since we don't need to build a block with transactions from the pool return Ok(()); @@ -391,13 +364,10 @@ where flashblocks_interval = self.config.specific.interval.as_millis(), ); ctx.metrics.reduced_flashblocks_number.record( - self.config - .flashblocks_per_block() - .saturating_sub(ctx.target_flashblock_count()) as f64, + self.config.flashblocks_per_block().saturating_sub(ctx.target_flashblock_count()) + as f64, ); - ctx.metrics - .first_flashblock_time_offset - .record(first_flashblock_offset.as_millis() as f64); + ctx.metrics.first_flashblock_time_offset.record(first_flashblock_offset.as_millis() as f64); let gas_per_batch = ctx.block_gas_limit() / ctx.target_flashblock_count(); let target_gas_for_batch = gas_per_batch; let da_per_batch = ctx @@ -426,8 +396,7 @@ where // Create best_transaction iterator let mut best_txs = BestFlashblocksTxs::new(BestPayloadTransactions::new( - self.pool - .best_transactions_with_attributes(ctx.best_transaction_attributes()), + self.pool.best_transactions_with_attributes(ctx.best_transaction_attributes()), )); let interval = self.config.specific.interval; let (tx, mut rx) = mpsc::channel((self.config.flashblocks_per_block() + 1) as usize); @@ -571,14 +540,14 @@ where let flashblock_build_start_time = Instant::now(); let builder_txs = - self.builder_tx - .simulate_builder_txs(&state_provider, info, ctx, state)?; + self.builder_tx.simulate_builder_txs(&state_provider, info, ctx, state)?; let builder_tx_gas = builder_txs.iter().fold(0, |acc, tx| acc + tx.gas_used); let builder_tx_da_size: u64 = builder_txs.iter().fold(0, |acc, tx| acc + tx.da_size); target_gas_for_batch = target_gas_for_batch.saturating_sub(builder_tx_gas); - // saturating sub just in case, we will log an error if da_limit too small for builder_tx_da_size + // saturating sub just in case, we will log an error if da_limit too small for + // builder_tx_da_size if let Some(da_limit) = total_da_per_batch.as_mut() { *da_limit = da_limit.saturating_sub(builder_tx_da_size); } @@ -586,18 +555,13 @@ where let best_txs_start_time = Instant::now(); best_txs.refresh_iterator( BestPayloadTransactions::new( - self.pool - .best_transactions_with_attributes(ctx.best_transaction_attributes()), + self.pool.best_transactions_with_attributes(ctx.best_transaction_attributes()), ), ctx.flashblock_index(), ); let transaction_pool_fetch_time = best_txs_start_time.elapsed(); - ctx.metrics - .transaction_pool_fetch_duration - .record(transaction_pool_fetch_time); - ctx.metrics - .transaction_pool_fetch_gauge - .set(transaction_pool_fetch_time); + ctx.metrics.transaction_pool_fetch_duration.record(transaction_pool_fetch_time); + ctx.metrics.transaction_pool_fetch_gauge.set(transaction_pool_fetch_time); let tx_execution_start_time = Instant::now(); ctx.execute_best_transactions( @@ -629,15 +593,10 @@ where } let payload_tx_simulation_time = tx_execution_start_time.elapsed(); - ctx.metrics - .payload_tx_simulation_duration - .record(payload_tx_simulation_time); - ctx.metrics - .payload_tx_simulation_gauge - .set(payload_tx_simulation_time); + ctx.metrics.payload_tx_simulation_duration.record(payload_tx_simulation_time); + ctx.metrics.payload_tx_simulation_gauge.set(payload_tx_simulation_time); - self.builder_tx - .add_builder_txs(&state_provider, info, ctx, state)?; + self.builder_tx.add_builder_txs(&state_provider, info, ctx, state)?; let total_block_built_duration = Instant::now(); let build_result = build_block( @@ -647,12 +606,8 @@ where ctx.extra_ctx.calculate_state_root || ctx.attributes().no_tx_pool, ); let total_block_built_duration = total_block_built_duration.elapsed(); - ctx.metrics - .total_block_built_duration - .record(total_block_built_duration); - ctx.metrics - .total_block_built_gauge - .set(total_block_built_duration); + ctx.metrics.total_block_built_duration.record(total_block_built_duration); + ctx.metrics.total_block_built_gauge.set(total_block_built_duration); // Handle build errors with match pattern match build_result { @@ -667,8 +622,9 @@ where fb_payload.index = ctx.flashblock_index(); fb_payload.base = None; - // If main token got canceled in here that means we received get_payload and we should drop everything and now update best_payload - // To ensure that we will return same blocks as rollup-boost (to leverage caches) + // If main token got canceled in here that means we received get_payload and we + // should drop everything and now update best_payload To ensure that + // we will return same blocks as rollup-boost (to leverage caches) if block_cancel.is_cancelled() { self.record_flashblocks_metrics( ctx, @@ -679,18 +635,12 @@ where ); return Ok(()); } - let flashblock_byte_size = self - .ws_pub - .publish(&fb_payload) - .map_err(PayloadBuilderError::other)?; + let flashblock_byte_size = + self.ws_pub.publish(&fb_payload).map_err(PayloadBuilderError::other)?; // Record flashblock build duration - ctx.metrics - .flashblock_build_duration - .record(flashblock_build_start_time.elapsed()); - ctx.metrics - .flashblock_byte_size_histogram - .record(flashblock_byte_size as f64); + ctx.metrics.flashblock_build_duration.record(flashblock_build_start_time.elapsed()); + ctx.metrics.flashblock_byte_size_histogram.record(flashblock_byte_size as f64); ctx.metrics .flashblock_num_tx_histogram .record(info.executed_transactions.len() as f64); @@ -734,18 +684,12 @@ where message: &str, ) { ctx.metrics.block_built_success.increment(1); - ctx.metrics - .flashblock_count - .record(ctx.flashblock_index() as f64); + ctx.metrics.flashblock_count.record(ctx.flashblock_index() as f64); ctx.metrics .missing_flashblocks_count .record(flashblocks_per_block.saturating_sub(ctx.flashblock_index()) as f64); - ctx.metrics - .payload_num_tx - .record(info.executed_transactions.len() as f64); - ctx.metrics - .payload_num_tx_gauge - .set(info.executed_transactions.len() as f64); + ctx.metrics.payload_num_tx.record(info.executed_transactions.len() as f64); + ctx.metrics.payload_num_tx_gauge.set(info.executed_transactions.len() as f64); debug!( target: "payload_builder", @@ -783,18 +727,20 @@ where if self.config.specific.fixed { return ( self.config.flashblocks_per_block(), - // We adjust first FB to ensure that we have at least some time to make all FB in time + // We adjust first FB to ensure that we have at least some time to make all FB in + // time self.config.specific.interval - self.config.specific.leeway_time, ); } // We use this system time to determine remining time to build a block // Things to consider: // FCU(a) - FCU with attributes - // FCU(a) could arrive with `block_time - fb_time < delay`. In this case we could only produce 1 flashblock - // FCU(a) could arrive with `delay < fb_time` - in this case we will shrink first flashblock - // FCU(a) could arrive with `fb_time < delay < block_time - fb_time` - in this case we will issue less flashblocks - let target_time = std::time::SystemTime::UNIX_EPOCH + Duration::from_secs(timestamp) - - self.config.specific.leeway_time; + // FCU(a) could arrive with `block_time - fb_time < delay`. In this case we could only + // produce 1 flashblock FCU(a) could arrive with `delay < fb_time` - in this case we + // will shrink first flashblock FCU(a) could arrive with `fb_time < delay < + // block_time - fb_time` - in this case we will issue less flashblocks + let target_time = std::time::SystemTime::UNIX_EPOCH + Duration::from_secs(timestamp) - + self.config.specific.leeway_time; let now = std::time::SystemTime::now(); let Ok(time_drift) = target_time.duration_since(now) else { error!( @@ -803,16 +749,10 @@ where ?target_time, ?now, ); - return ( - self.config.flashblocks_per_block(), - self.config.specific.interval, - ); + return (self.config.flashblocks_per_block(), self.config.specific.interval); }; self.metrics.flashblocks_time_drift.record( - self.config - .block_time - .as_millis() - .saturating_sub(time_drift.as_millis()) as f64, + self.config.block_time.as_millis().saturating_sub(time_drift.as_millis()) as f64, ); debug!( target: "payload_builder", @@ -821,7 +761,8 @@ where time_drift = self.config.block_time.as_millis().saturating_sub(time_drift.as_millis()), ?timestamp ); - // This is extra check to ensure that we would account at least for block time in case we have any timer discrepancies. + // This is extra check to ensure that we would account at least for block time in case we + // have any timer discrepancies. let time_drift = time_drift.min(self.config.block_time); let interval = self.config.specific.interval.as_millis() as u64; let time_drift = time_drift.as_millis() as u64; @@ -831,10 +772,7 @@ where (time_drift.div(interval), Duration::from_millis(interval)) } else { // Non-perfect division, so we account for it. - ( - time_drift.div(interval) + 1, - Duration::from_millis(first_flashblock_offset), - ) + (time_drift.div(interval) + 1, Duration::from_millis(first_flashblock_offset)) } } } @@ -901,12 +839,8 @@ where let state_merge_start_time = Instant::now(); state.merge_transitions(BundleRetention::Reverts); let state_transition_merge_time = state_merge_start_time.elapsed(); - ctx.metrics - .state_transition_merge_duration - .record(state_transition_merge_time); - ctx.metrics - .state_transition_merge_gauge - .set(state_transition_merge_time); + ctx.metrics.state_transition_merge_duration.record(state_transition_merge_time); + ctx.metrics.state_transition_merge_gauge.set(state_transition_merge_time); let block_number = ctx.block_number(); assert_eq!(block_number, ctx.parent().number + 1); @@ -927,9 +861,7 @@ where ) }) .expect("Number is in range"); - let logs_bloom = execution_outcome - .block_logs_bloom(block_number) - .expect("Number is in range"); + let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); // TODO: maybe recreate state with bundle in here // // calculate the state root @@ -942,49 +874,38 @@ where let state_provider = state.database.as_ref(); hashed_state = state_provider.hashed_post_state(execution_outcome.state()); (state_root, trie_output) = { - state - .database - .as_ref() - .state_root_with_updates(hashed_state.clone()) - .inspect_err(|err| { + state.database.as_ref().state_root_with_updates(hashed_state.clone()).inspect_err( + |err| { warn!(target: "payload_builder", parent_header=%ctx.parent().hash(), %err, "failed to calculate state root for payload" ); - })? + }, + )? }; let state_root_calculation_time = state_root_start_time.elapsed(); - ctx.metrics - .state_root_calculation_duration - .record(state_root_calculation_time); - ctx.metrics - .state_root_calculation_gauge - .set(state_root_calculation_time); + ctx.metrics.state_root_calculation_duration.record(state_root_calculation_time); + ctx.metrics.state_root_calculation_gauge.set(state_root_calculation_time); } let mut requests_hash = None; - let withdrawals_root = if ctx - .chain_spec - .is_isthmus_active_at_timestamp(ctx.attributes().timestamp()) - { - // always empty requests hash post isthmus - requests_hash = Some(EMPTY_REQUESTS_HASH); - - // withdrawals root field in block header is used for storage root of L2 predeploy - // `l2tol1-message-passer` - Some( - isthmus::withdrawals_root(execution_outcome.state(), state.database.as_ref()) - .map_err(PayloadBuilderError::other)?, - ) - } else if ctx - .chain_spec - .is_canyon_active_at_timestamp(ctx.attributes().timestamp()) - { - Some(EMPTY_WITHDRAWALS) - } else { - None - }; + let withdrawals_root = + if ctx.chain_spec.is_isthmus_active_at_timestamp(ctx.attributes().timestamp()) { + // always empty requests hash post isthmus + requests_hash = Some(EMPTY_REQUESTS_HASH); + + // withdrawals root field in block header is used for storage root of L2 predeploy + // `l2tol1-message-passer` + Some( + isthmus::withdrawals_root(execution_outcome.state(), state.database.as_ref()) + .map_err(PayloadBuilderError::other)?, + ) + } else if ctx.chain_spec.is_canyon_active_at_timestamp(ctx.attributes().timestamp()) { + Some(EMPTY_WITHDRAWALS) + } else { + None + }; // create the block header let transactions_root = proofs::calculate_transaction_root(&info.executed_transactions); @@ -1050,11 +971,8 @@ where // pick the new transactions from the info field and update the last flashblock index let new_transactions = info.executed_transactions[info.extra.last_flashblock_index..].to_vec(); - let new_transactions_encoded = new_transactions - .clone() - .into_iter() - .map(|tx| tx.encoded_2718().into()) - .collect::>(); + let new_transactions_encoded = + new_transactions.clone().into_iter().map(|tx| tx.encoded_2718().into()).collect::>(); let new_receipts = info.receipts[info.extra.last_flashblock_index..].to_vec(); info.extra.last_flashblock_index = info.executed_transactions.len(); @@ -1113,12 +1031,7 @@ where state.transition_state = untouched_transition_state; Ok(( - OpBuiltPayload::new( - ctx.payload_id(), - sealed_block, - info.total_fees, - Some(executed), - ), + OpBuiltPayload::new(ctx.payload_id(), sealed_block, info.total_fees, Some(executed)), fb_payload, )) } diff --git a/crates/op-rbuilder/src/builders/flashblocks/wspub.rs b/crates/op-rbuilder/src/builders/flashblocks/wspub.rs index 944edb911..c5499f403 100644 --- a/crates/op-rbuilder/src/builders/flashblocks/wspub.rs +++ b/crates/op-rbuilder/src/builders/flashblocks/wspub.rs @@ -53,12 +53,7 @@ impl WebSocketPublisher { Arc::clone(&subs), )); - Ok(Self { - sent, - subs, - term, - pipe, - }) + Ok(Self { sent, subs, term, pipe }) } pub(super) fn publish(&self, payload: &FlashblocksPayloadV1) -> io::Result { @@ -100,16 +95,12 @@ async fn listener_loop( sent: Arc, subs: Arc, ) { - listener - .set_nonblocking(true) - .expect("Failed to set TcpListener socket to non-blocking"); + listener.set_nonblocking(true).expect("Failed to set TcpListener socket to non-blocking"); let listener = tokio::net::TcpListener::from_std(listener) .expect("Failed to convert TcpListener to tokio TcpListener"); - let listen_addr = listener - .local_addr() - .expect("Failed to get local address of listener"); + let listen_addr = listener.local_addr().expect("Failed to get local address of listener"); tracing::info!("Flashblocks WebSocketPublisher listening on {listen_addr}"); let mut term = term; diff --git a/crates/op-rbuilder/src/builders/generator.rs b/crates/op-rbuilder/src/builders/generator.rs index 3408751c8..a52702578 100644 --- a/crates/op-rbuilder/src/builders/generator.rs +++ b/crates/op-rbuilder/src/builders/generator.rs @@ -111,10 +111,7 @@ impl BlockPayloadJobGenerator { /// Returns the pre-cached reads for the given parent header if it matches the cached state's /// block. fn maybe_pre_cached(&self, parent: B256) -> Option { - self.pre_cached - .as_ref() - .filter(|pc| pc.block == parent) - .map(|pc| pc.cached.clone()) + self.pre_cached.as_ref().filter(|pc| pc.block == parent).map(|pc| pc.cached.clone()) } } @@ -217,19 +214,13 @@ where if let Some(info) = acc.info.clone() { // we want pre cache existing accounts and their storage // this only includes changed accounts and storage but is better than nothing - let storage = acc - .storage - .iter() - .map(|(key, slot)| (*key, slot.present_value)) - .collect(); + let storage = + acc.storage.iter().map(|(key, slot)| (*key, slot.present_value)).collect(); cached.insert_account(addr, info, storage); } } - self.pre_cached = Some(PrecachedState { - block: committed.tip().hash(), - cached, - }); + self.pre_cached = Some(PrecachedState { block: committed.tip().hash(), cached }); } } @@ -324,11 +315,7 @@ where self.build_complete = Some(rx); let cached_reads = self.cached_reads.take().unwrap_or_default(); self.executor.spawn_blocking(Box::pin(async move { - let args = BuildArguments { - cached_reads, - config: payload_config, - cancel, - }; + let args = BuildArguments { cached_reads, config: payload_config, cancel }; let result = builder.try_build(args, cell).await; let _ = tx.send(result); @@ -397,10 +384,7 @@ pub(super) struct BlockCell { impl BlockCell { pub(super) fn new() -> Self { - Self { - inner: Arc::new(Mutex::new(None)), - notify: Arc::new(Notify::new()), - } + Self { inner: Arc::new(Mutex::new(None)), notify: Arc::new(Notify::new()) } } pub(super) fn set(&self, value: T) { @@ -447,10 +431,7 @@ impl Default for BlockCell { } fn job_deadline(unix_timestamp_secs: u64) -> std::time::Duration { - let unix_now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs(); + let unix_now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); // Safe subtraction that handles the case where timestamp is in the past let duration_until = unix_timestamp_secs.saturating_sub(unix_now); @@ -557,10 +538,7 @@ mod tests { impl MockBuilder { fn new() -> Self { - Self { - events: Arc::new(Mutex::new(vec![])), - _marker: std::marker::PhantomData, - } + Self { events: Arc::new(Mutex::new(vec![])), _marker: std::marker::PhantomData } } fn new_event(&self, event: BlockEvent) { @@ -668,10 +646,7 @@ mod tests { let blocks = random_block_range( &mut rng, start..=start + count - 1, - BlockRangeParams { - tx_count: 0..2, - ..Default::default() - }, + BlockRangeParams { tx_count: 0..2, ..Default::default() }, ); client.extend_blocks(blocks.iter().cloned().map(|b| (b.hash(), b.unseal()))); diff --git a/crates/op-rbuilder/src/builders/mod.rs b/crates/op-rbuilder/src/builders/mod.rs index 8dcde8eb5..a2a0aef50 100644 --- a/crates/op-rbuilder/src/builders/mod.rs +++ b/crates/op-rbuilder/src/builders/mod.rs @@ -29,23 +29,27 @@ pub use standard::StandardBuilder; /// Defines the payload building mode for the OP builder. #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum BuilderMode { - /// Uses the plain OP payload builder that produces blocks every chain blocktime. + /// Uses the plain OP payload builder that produces blocks every chain + /// blocktime. #[default] Standard, - /// Uses the flashblocks payload builder that progressively builds chunks of a - /// block every short interval and makes it available through a websocket update - /// then merges them into a full block every chain block time. + /// Uses the flashblocks payload builder that progressively builds chunks of + /// a block every short interval and makes it available through a + /// websocket update then merges them into a full block every chain + /// block time. Flashblocks, } /// Defines the interface for any block builder implementation API entry point. /// -/// Instances of this trait are used during Reth node construction as an argument -/// to the `NodeBuilder::with_components` method to construct the payload builder -/// service that gets called whenver the current node is asked to build a block. +/// Instances of this trait are used during Reth node construction as an +/// argument to the `NodeBuilder::with_components` method to construct the +/// payload builder service that gets called whenever the current node is asked +/// to build a block. pub trait PayloadBuilder: Send + Sync + 'static { - /// The type that has an implementation specific variant of the Config struct. - /// This is used to configure the payload builder service during startup. + /// The type that has an implementation specific variant of the Config + /// struct. This is used to configure the payload builder service during + /// startup. type Config: TryFrom + Clone + Debug + Send + Sync + 'static; /// The type that is used to instantiate the payload builder service @@ -56,9 +60,9 @@ pub trait PayloadBuilder: Send + Sync + 'static { Node: NodeBounds, Pool: PoolBounds; - /// Called during node startup by reth. Returns a [`PayloadBuilderService`] instance - /// that is preloaded with a [`PayloadJobGenerator`] instance specific to the builder - /// type. + /// Called during node startup by reth. Returns a [`PayloadBuilderService`] + /// instance that is preloaded with a [`PayloadJobGenerator`] instance + /// specific to the builder type. fn new_service( config: BuilderConfig, ) -> eyre::Result> @@ -70,25 +74,27 @@ pub trait PayloadBuilder: Send + Sync + 'static { /// Configuration values that are applicable to any type of block builder. #[derive(Clone)] pub struct BuilderConfig { - /// Secret key of the builder that is used to sign the end of block transaction. + /// Secret key of the builder that is used to sign the end of block + /// transaction. pub builder_signer: Option, - /// When set to true, transactions are simulated by the builder and excluded from the block - /// if they revert. They may still be included in the block if individual transactions - /// opt-out of revert protection. + /// When set to true, transactions are simulated by the builder and excluded + /// from the block if they revert. They may still be included in the + /// block if individual transactions opt-out of revert protection. pub revert_protection: bool, - /// When enabled, this will invoke the flashtestions workflow. This involves a - /// bootstrapping step that generates a new pubkey for the TEE service + /// When enabled, this will invoke the flashtestions workflow. This involves + /// a bootstrapping step that generates a new pubkey for the TEE service pub flashtestations_config: FlashtestationsArgs, /// The interval at which blocks are added to the chain. - /// This is also the frequency at which the builder will be receiving FCU requests from the - /// sequencer. + /// This is also the frequency at which the builder will be receiving FCU + /// requests from the sequencer. pub block_time: Duration, /// Data Availability configuration for the OP builder - /// Defines constraints for the maximum size of data availability transactions. + /// Defines constraints for the maximum size of data availability + /// transactions. pub da_config: OpDAConfig, // The deadline is critical for payload availability. If we reach the deadline, @@ -109,10 +115,12 @@ pub struct BuilderConfig { // (not just 0.5s) because of that. pub block_time_leeway: Duration, - /// Inverted sampling frequency in blocks. 1 - each block, 100 - every 100th block. + /// Inverted sampling frequency in blocks. 1 - each block, 100 - every 100th + /// block. pub sampling_ratio: u64, - /// Configuration values that are specific to the block builder implementation used. + /// Configuration values that are specific to the block builder + /// implementation used. pub specific: Specific, /// Maximum gas a transaction can use before being excluded. pub max_gas_per_txn: Option, @@ -121,7 +129,7 @@ pub struct BuilderConfig { pub gas_limiter_config: GasLimiterArgs, } -impl core::fmt::Debug for BuilderConfig { +impl Debug for BuilderConfig { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("Config") .field( diff --git a/crates/op-rbuilder/src/builders/standard/builder_tx.rs b/crates/op-rbuilder/src/builders/standard/builder_tx.rs index 23c39f3c8..7dfe429f7 100644 --- a/crates/op-rbuilder/src/builders/standard/builder_tx.rs +++ b/crates/op-rbuilder/src/builders/standard/builder_tx.rs @@ -31,10 +31,7 @@ impl StandardBuilderTx { signer: Option, flashtestations_builder_tx: Option, ) -> Self { - Self { - signer, - flashtestations_builder_tx, - } + Self { signer, flashtestations_builder_tx } } pub(super) fn simulate_builder_tx( @@ -50,11 +47,7 @@ impl StandardBuilderTx { let da_size = op_alloy_flz::tx_estimated_size_fjord_bytes( signed_tx.encoded_2718().as_slice(), ); - Ok(Some(BuilderTransactionCtx { - gas_used, - da_size, - signed_tx, - })) + Ok(Some(BuilderTransactionCtx { gas_used, da_size, signed_tx })) } None => Ok(None), } @@ -63,11 +56,7 @@ impl StandardBuilderTx { fn estimate_builder_tx_gas(&self, input: &[u8]) -> u64 { // Count zero and non-zero bytes let (zero_bytes, nonzero_bytes) = input.iter().fold((0, 0), |(zeros, nonzeros), &byte| { - if byte == 0 { - (zeros + 1, nonzeros) - } else { - (zeros, nonzeros + 1) - } + if byte == 0 { (zeros + 1, nonzeros) } else { (zeros, nonzeros + 1) } }); // Calculate gas cost (4 gas per zero byte, 16 gas per non-zero byte) @@ -107,9 +96,7 @@ impl StandardBuilderTx { ..Default::default() }); // Sign the transaction - let builder_tx = signer - .sign_tx(tx) - .map_err(BuilderTransactionError::SigningError)?; + let builder_tx = signer.sign_tx(tx).map_err(BuilderTransactionError::SigningError)?; Ok(builder_tx) } diff --git a/crates/op-rbuilder/src/builders/standard/payload.rs b/crates/op-rbuilder/src/builders/standard/payload.rs index 52acef4b7..444068b1b 100644 --- a/crates/op-rbuilder/src/builders/standard/payload.rs +++ b/crates/op-rbuilder/src/builders/standard/payload.rs @@ -101,10 +101,7 @@ impl OpPayloadTransactions for () { ) -> impl PayloadTransactions { // TODO: once this issue is fixed we could remove without_updates and rely on regular impl // https://github.com/paradigmxyz/reth/issues/17325 - BestPayloadTransactions::new( - pool.best_transactions_with_attributes(attr) - .without_updates(), - ) + BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr).without_updates()) } } @@ -132,16 +129,11 @@ where best_payload: _, } = args; - let args = BuildArguments { - cached_reads, - config, - cancel: CancellationToken::new(), - }; + let args = BuildArguments { cached_reads, config, cancel: CancellationToken::new() }; self.build_payload(args, |attrs| { #[allow(clippy::unit_arg)] - self.best_transactions - .best_transactions(pool.clone(), attrs) + self.best_transactions.best_transactions(pool.clone(), attrs) }) } @@ -159,16 +151,11 @@ where reth_basic_payload_builder::HeaderForPayload, >, ) -> Result { - let args = BuildArguments { - config, - cached_reads: Default::default(), - cancel: Default::default(), - }; - self.build_payload(args, |_| { - NoopPayloadTransactions::::default() - })? - .into_payload() - .ok_or_else(|| PayloadBuilderError::MissingPayload) + let args = + BuildArguments { config, cached_reads: Default::default(), cancel: Default::default() }; + self.build_payload(args, |_| NoopPayloadTransactions::::default())? + .into_payload() + .ok_or_else(|| PayloadBuilderError::MissingPayload) } } @@ -193,11 +180,7 @@ where ) -> Result, PayloadBuilderError> { let block_build_start_time = Instant::now(); - let BuildArguments { - mut cached_reads, - config, - cancel, - } = args; + let BuildArguments { mut cached_reads, config, cancel } = args; let chain_spec = self.client.chain_spec(); let timestamp = config.attributes.timestamp(); @@ -205,14 +188,8 @@ where timestamp, suggested_fee_recipient: config.attributes.suggested_fee_recipient(), prev_randao: config.attributes.prev_randao(), - gas_limit: config - .attributes - .gas_limit - .unwrap_or(config.parent_header.gas_limit), - parent_beacon_block_root: config - .attributes - .payload_attributes - .parent_beacon_block_root, + gas_limit: config.attributes.gas_limit.unwrap_or(config.parent_header.gas_limit), + parent_beacon_block_root: config.attributes.payload_attributes.parent_beacon_block_root, extra_data: if chain_spec.is_holocene_active_at_timestamp(timestamp) { config .attributes @@ -251,10 +228,7 @@ where let db = StateProviderDatabase::new(&state_provider); let metrics = ctx.metrics.clone(); if ctx.attributes().no_tx_pool { - let state = State::builder() - .with_database(db) - .with_bundle_update() - .build(); + let state = State::builder().with_database(db).with_bundle_update().build(); builder.build(state, &state_provider, ctx, self.builder_tx.clone()) } else { // sequencer mode we can reuse cachedreads from previous runs @@ -266,12 +240,8 @@ where } .map(|out| { let total_block_building_time = block_build_start_time.elapsed(); - metrics - .total_block_built_duration - .record(total_block_building_time); - metrics - .total_block_built_gauge - .set(total_block_building_time); + metrics.total_block_built_duration.record(total_block_building_time); + metrics.total_block_built_gauge.set(total_block_building_time); out.with_cached_reads(cached_reads) }) @@ -301,9 +271,7 @@ pub(super) struct OpBuilder<'a, Txs> { impl<'a, Txs> OpBuilder<'a, Txs> { fn new(best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a) -> Self { - Self { - best: Box::new(best), - } + Self { best: Box::new(best) } } } @@ -372,12 +340,8 @@ impl OpBuilder<'_, Txs> { let best_txs_start_time = Instant::now(); let mut best_txs = best(ctx.best_transaction_attributes()); let transaction_pool_fetch_time = best_txs_start_time.elapsed(); - ctx.metrics - .transaction_pool_fetch_duration - .record(transaction_pool_fetch_time); - ctx.metrics - .transaction_pool_fetch_gauge - .set(transaction_pool_fetch_time); + ctx.metrics.transaction_pool_fetch_duration.record(transaction_pool_fetch_time); + ctx.metrics.transaction_pool_fetch_gauge.set(transaction_pool_fetch_time); if ctx .execute_best_transactions( @@ -403,19 +367,11 @@ impl OpBuilder<'_, Txs> { db.merge_transitions(BundleRetention::Reverts); let state_transition_merge_time = state_merge_start_time.elapsed(); - ctx.metrics - .state_transition_merge_duration - .record(state_transition_merge_time); - ctx.metrics - .state_transition_merge_gauge - .set(state_transition_merge_time); - - ctx.metrics - .payload_num_tx - .record(info.executed_transactions.len() as f64); - ctx.metrics - .payload_num_tx_gauge - .set(info.executed_transactions.len() as f64); + ctx.metrics.state_transition_merge_duration.record(state_transition_merge_time); + ctx.metrics.state_transition_merge_gauge.set(state_transition_merge_time); + + ctx.metrics.payload_num_tx.record(info.executed_transactions.len() as f64); + ctx.metrics.payload_num_tx_gauge.set(info.executed_transactions.len() as f64); let payload = ExecutedPayload { info }; @@ -434,10 +390,7 @@ impl OpBuilder<'_, Txs> { where BuilderTx: BuilderTransactions, { - let mut db = State::builder() - .with_database(state) - .with_bundle_update() - .build(); + let mut db = State::builder().with_database(state).with_bundle_update().build(); let ExecutedPayload { info } = match self.execute(&state_provider, &mut db, &ctx, builder_tx)? { BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, @@ -448,12 +401,8 @@ impl OpBuilder<'_, Txs> { }; let block_number = ctx.block_number(); - let execution_outcome = ExecutionOutcome::new( - db.take_bundle(), - vec![info.receipts], - block_number, - Vec::new(), - ); + let execution_outcome = + ExecutionOutcome::new(db.take_bundle(), vec![info.receipts], block_number, Vec::new()); let receipts_root = execution_outcome .generic_receipts_root_slow(block_number, |receipts| { calculate_receipt_root_no_memo_optimism( @@ -463,33 +412,26 @@ impl OpBuilder<'_, Txs> { ) }) .expect("Number is in range"); - let logs_bloom = execution_outcome - .block_logs_bloom(block_number) - .expect("Number is in range"); + let logs_bloom = + execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root let state_root_start_time = Instant::now(); let hashed_state = state_provider.hashed_post_state(execution_outcome.state()); let (state_root, trie_output) = { - state_provider - .state_root_with_updates(hashed_state.clone()) - .inspect_err(|err| { - warn!(target: "payload_builder", - parent_header=%ctx.parent().hash(), - %err, - "failed to calculate state root for payload" - ); - })? + state_provider.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + warn!(target: "payload_builder", + parent_header=%ctx.parent().hash(), + %err, + "failed to calculate state root for payload" + ); + })? }; let state_root_calculation_time = state_root_start_time.elapsed(); - ctx.metrics - .state_root_calculation_duration - .record(state_root_calculation_time); - ctx.metrics - .state_root_calculation_gauge - .set(state_root_calculation_time); + ctx.metrics.state_root_calculation_duration.record(state_root_calculation_time); + ctx.metrics.state_root_calculation_gauge.set(state_root_calculation_time); let (withdrawals_root, requests_hash) = if ctx.is_isthmus_active() { // withdrawals root field in block header is used for storage root of L2 predeploy @@ -569,19 +511,11 @@ impl OpBuilder<'_, Txs> { let no_tx_pool = ctx.attributes().no_tx_pool; - let payload = OpBuiltPayload::new( - ctx.payload_id(), - sealed_block, - info.total_fees, - Some(executed), - ); + let payload = + OpBuiltPayload::new(ctx.payload_id(), sealed_block, info.total_fees, Some(executed)); - ctx.metrics - .payload_byte_size - .record(payload.block().size() as f64); - ctx.metrics - .payload_byte_size_gauge - .set(payload.block().size() as f64); + ctx.metrics.payload_byte_size.record(payload.block().size() as f64); + ctx.metrics.payload_byte_size_gauge.set(payload.block().size() as f64); if no_tx_pool { // if `no_tx_pool` is set only transactions from the payload attributes will be included diff --git a/crates/op-rbuilder/src/builders/standard/service.rs b/crates/op-rbuilder/src/builders/standard/service.rs index c713b69cb..bc1742a3d 100644 --- a/crates/op-rbuilder/src/builders/standard/service.rs +++ b/crates/op-rbuilder/src/builders/standard/service.rs @@ -53,8 +53,7 @@ impl StandardServiceBuilder { let (payload_service, payload_service_handle) = PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream()); - ctx.task_executor() - .spawn_critical("payload builder service", Box::pin(payload_service)); + ctx.task_executor().spawn_critical("payload builder service", Box::pin(payload_service)); Ok(payload_service_handle) } diff --git a/crates/op-rbuilder/src/flashtestations/args.rs b/crates/op-rbuilder/src/flashtestations/args.rs index cc4f36463..e9bc3cc02 100644 --- a/crates/op-rbuilder/src/flashtestations/args.rs +++ b/crates/op-rbuilder/src/flashtestations/args.rs @@ -16,11 +16,7 @@ pub struct FlashtestationsArgs { pub flashtestations_enabled: bool, /// Whether to use the debug HTTP service for quotes - #[arg( - long = "flashtestations.debug", - default_value = "false", - env = "FLASHTESTATIONS_DEBUG" - )] + #[arg(long = "flashtestations.debug", default_value = "false", env = "FLASHTESTATIONS_DEBUG")] pub debug: bool, // Debug url for attestations diff --git a/crates/op-rbuilder/src/flashtestations/attestation.rs b/crates/op-rbuilder/src/flashtestations/attestation.rs index e52f588ad..11b9967de 100644 --- a/crates/op-rbuilder/src/flashtestations/attestation.rs +++ b/crates/op-rbuilder/src/flashtestations/attestation.rs @@ -36,9 +36,7 @@ impl AttestationProvider for DebugAttestationProvider { info!(target: "flashtestations", url = url, "fetching quote in debug mode"); - let response = ureq::get(&url) - .timeout(std::time::Duration::from_secs(10)) - .call()?; + let response = ureq::get(&url).timeout(std::time::Duration::from_secs(10)).call()?; let mut body = Vec::new(); response.into_reader().read_to_end(&mut body)?; @@ -52,14 +50,10 @@ pub fn get_attestation_provider( ) -> Box { if config.debug { Box::new(DebugAttestationProvider::new( - config - .debug_url - .unwrap_or(DEBUG_QUOTE_SERVICE_URL.to_string()), + config.debug_url.unwrap_or(DEBUG_QUOTE_SERVICE_URL.to_string()), )) } else { // TODO: replace with real attestation provider - Box::new(DebugAttestationProvider::new( - DEBUG_QUOTE_SERVICE_URL.to_string(), - )) + Box::new(DebugAttestationProvider::new(DEBUG_QUOTE_SERVICE_URL.to_string())) } } diff --git a/crates/op-rbuilder/src/flashtestations/service.rs b/crates/op-rbuilder/src/flashtestations/service.rs index ffcd22767..9fdb0eb69 100644 --- a/crates/op-rbuilder/src/flashtestations/service.rs +++ b/crates/op-rbuilder/src/flashtestations/service.rs @@ -39,11 +39,7 @@ pub struct FlashtestationsService { impl FlashtestationsService { pub fn new(args: FlashtestationsArgs) -> Self { let (private_key, public_key, address) = generate_ethereum_keypair(); - let tee_service_signer = Signer { - address, - pubkey: public_key, - secret: private_key, - }; + let tee_service_signer = Signer { address, pubkey: public_key, secret: private_key }; let attestation_provider = Arc::new(get_attestation_provider(AttestationConfig { debug: args.debug, @@ -52,11 +48,9 @@ impl FlashtestationsService { let tx_manager = TxManager::new( tee_service_signer, - args.funding_key - .expect("funding key required when flashtestations enabled"), + args.funding_key.expect("funding key required when flashtestations enabled"), args.rpc_url, - args.registry_address - .expect("registry address required when flashtestations enabled"), + args.registry_address.expect("registry address required when flashtestations enabled"), args.builder_policy_address .expect("builder policy address required when flashtestations enabled"), args.builder_proof_version, @@ -81,9 +75,7 @@ impl FlashtestationsService { let attestation = self.attestation_provider.get_attestation(report_data)?; // Submit report onchain by registering the key of the tee service - self.tx_manager - .fund_and_register_tee_service(attestation, self.funding_amount) - .await + self.tx_manager.fund_and_register_tee_service(attestation, self.funding_amount).await } pub async fn clean_up(&self) -> eyre::Result<()> { @@ -120,22 +112,21 @@ where flashtestations_service.bootstrap().await?; let flashtestations_clone = flashtestations_service.clone(); - ctx.task_executor() - .spawn_critical_with_graceful_shutdown_signal( - "flashtestations clean up task", - |shutdown| { - Box::pin(async move { - let graceful_guard = shutdown.await; - if let Err(e) = flashtestations_clone.clean_up().await { - warn!( - error = %e, - "Failed to complete clean up for flashtestations service", - ) - }; - drop(graceful_guard) - }) - }, - ); + ctx.task_executor().spawn_critical_with_graceful_shutdown_signal( + "flashtestations clean up task", + |shutdown| { + Box::pin(async move { + let graceful_guard = shutdown.await; + if let Err(e) = flashtestations_clone.clean_up().await { + warn!( + error = %e, + "Failed to complete clean up for flashtestations service", + ) + }; + drop(graceful_guard) + }) + }, + ); Ok(FlashtestationsBuilderTx {}) } @@ -148,7 +139,8 @@ mod tests { use crate::tx_signer::public_key_to_address; - /// Derives Ethereum address from report data using the same logic as the Solidity contract + /// Derives Ethereum address from report data using the same logic as the + /// Solidity contract fn derive_ethereum_address_from_report_data(pubkey_64_bytes: &[u8]) -> Address { // This exactly matches the Solidity implementation: // address(uint160(uint256(keccak256(reportData)))) @@ -178,9 +170,6 @@ mod tests { let report_data = &pubkey_bytes[1..65]; // Skip 0x04 prefix let manual_address = derive_ethereum_address_from_report_data(report_data); - assert_eq!( - our_address, manual_address, - "Address derivation should match" - ); + assert_eq!(our_address, manual_address, "Address derivation should match"); } } diff --git a/crates/op-rbuilder/src/flashtestations/tx_manager.rs b/crates/op-rbuilder/src/flashtestations/tx_manager.rs index 9eabdd28a..a6d44cc4b 100644 --- a/crates/op-rbuilder/src/flashtestations/tx_manager.rs +++ b/crates/op-rbuilder/src/flashtestations/tx_manager.rs @@ -107,12 +107,8 @@ impl TxManager { funding_amount: U256, ) -> eyre::Result<()> { info!(target: "flashtestations", "funding TEE address at {}", self.tee_service_signer.address); - self.fund_address( - self.funding_signer, - self.tee_service_signer.address, - funding_amount, - ) - .await?; + self.fund_address(self.funding_signer, self.tee_service_signer.address, funding_amount) + .await?; let quote_bytes = Bytes::from(attestation); let wallet = @@ -129,10 +125,8 @@ impl TxManager { info!(target: "flashtestations", "submitting quote to registry at {}", self.registry_address); // TODO: add retries - let calldata = IFlashtestationRegistry::registerTEEServiceCall { - rawQuote: quote_bytes, - } - .abi_encode(); + let calldata = + IFlashtestationRegistry::registerTEEServiceCall { rawQuote: quote_bytes }.abi_encode(); let tx = TransactionRequest { from: Some(self.tee_service_signer.address), to: Some(TxKind::Call(self.registry_address)), @@ -190,9 +184,7 @@ impl TxManager { .network::() .connect(self.rpc_url.as_str()) .await?; - let balance = provider - .get_balance(self.tee_service_signer.address) - .await?; + let balance = provider.get_balance(self.tee_service_signer.address).await?; let gas_estimate = 21_000u128; let gas_price = provider.get_gas_price().await?; let gas_cost = U256::from(gas_estimate * gas_price); @@ -207,7 +199,8 @@ impl TxManager { Ok(()) } - /// Processes a pending transaction and logs whether the transaction succeeded or not + /// Processes a pending transaction and logs whether the transaction + /// succeeded or not async fn process_pending_tx( pending_tx_result: TransportResult>, ) -> eyre::Result { @@ -217,11 +210,7 @@ impl TxManager { debug!(target: "flashtestations", tx_hash = %tx_hash, "transaction submitted"); // Wait for funding transaction confirmation - match pending_tx - .with_timeout(Some(Duration::from_secs(30))) - .get_receipt() - .await - { + match pending_tx.with_timeout(Some(Duration::from_secs(30))).get_receipt().await { Ok(receipt) => { if receipt.status() { Ok(receipt.transaction_hash()) @@ -237,7 +226,8 @@ impl TxManager { } /// Computes the block content hash according to the formula: - /// keccak256(abi.encode(parentHash, blockNumber, timestamp, transactionHashes)) + /// keccak256(abi.encode(parentHash, blockNumber, timestamp, + /// transactionHashes)) fn compute_block_content_hash(payload: OpBuiltPayload) -> B256 { let block = payload.block(); let body = block.clone().into_body(); diff --git a/crates/op-rbuilder/src/gas_limiter/args.rs b/crates/op-rbuilder/src/gas_limiter/args.rs index ec7f8008b..2896c4894 100644 --- a/crates/op-rbuilder/src/gas_limiter/args.rs +++ b/crates/op-rbuilder/src/gas_limiter/args.rs @@ -7,19 +7,11 @@ pub struct GasLimiterArgs { pub gas_limiter_enabled: bool, /// Maximum gas per address in token bucket. Defaults to 10 million gas. - #[arg( - long = "gas-limiter.max-gas-per-address", - env, - default_value = "10000000" - )] + #[arg(long = "gas-limiter.max-gas-per-address", env, default_value = "10000000")] pub max_gas_per_address: u64, /// Gas refill rate per block. Defaults to 1 million gas per block. - #[arg( - long = "gas-limiter.refill-rate-per-block", - env, - default_value = "1000000" - )] + #[arg(long = "gas-limiter.refill-rate-per-block", env, default_value = "1000000")] pub refill_rate_per_block: u64, /// How many blocks to wait before cleaning up stale buckets for addresses. diff --git a/crates/op-rbuilder/src/gas_limiter/error.rs b/crates/op-rbuilder/src/gas_limiter/error.rs index a85b7f77c..a42d3f00d 100644 --- a/crates/op-rbuilder/src/gas_limiter/error.rs +++ b/crates/op-rbuilder/src/gas_limiter/error.rs @@ -5,9 +5,5 @@ pub enum GasLimitError { #[error( "Address {address} exceeded gas limit: {requested} gwei requested, {available} gwei available" )] - AddressLimitExceeded { - address: Address, - requested: u64, - available: u64, - }, + AddressLimitExceeded { address: Address, requested: u64, available: u64 }, } diff --git a/crates/op-rbuilder/src/gas_limiter/metrics.rs b/crates/op-rbuilder/src/gas_limiter/metrics.rs index f78986570..0eb3963b6 100644 --- a/crates/op-rbuilder/src/gas_limiter/metrics.rs +++ b/crates/op-rbuilder/src/gas_limiter/metrics.rs @@ -40,8 +40,7 @@ impl GasLimiterMetrics { } pub(super) fn record_refresh(&self, removed_addresses: usize, duration: Duration) { - self.active_address_count - .decrement(removed_addresses as f64); + self.active_address_count.decrement(removed_addresses as f64); self.refresh_duration.record(duration); } } diff --git a/crates/op-rbuilder/src/gas_limiter/mod.rs b/crates/op-rbuilder/src/gas_limiter/mod.rs index 327a6749d..33957c544 100644 --- a/crates/op-rbuilder/src/gas_limiter/mod.rs +++ b/crates/op-rbuilder/src/gas_limiter/mod.rs @@ -31,9 +31,7 @@ struct TokenBucket { impl AddressGasLimiter { pub fn new(config: GasLimiterArgs) -> Self { - Self { - inner: AddressGasLimiterInner::try_new(config), - } + Self { inner: AddressGasLimiterInner::try_new(config) } } /// Check if there's enough gas for this address and consume it. Returns @@ -60,11 +58,7 @@ impl AddressGasLimiterInner { return None; } - Some(Self { - config, - address_buckets: Default::default(), - metrics: Default::default(), - }) + Some(Self { config, address_buckets: Default::default(), metrics: Default::default() }) } fn consume_gas_inner( @@ -108,16 +102,13 @@ impl AddressGasLimiterInner { let active_addresses = self.address_buckets.len(); self.address_buckets.iter_mut().for_each(|mut bucket| { - bucket.available = min( - bucket.capacity, - bucket.available + self.config.refill_rate_per_block, - ) + bucket.available = + min(bucket.capacity, bucket.available + self.config.refill_rate_per_block) }); // Only clean up stale buckets every `cleanup_interval` blocks - if block_number % self.config.cleanup_interval == 0 { - self.address_buckets - .retain(|_, bucket| bucket.available <= bucket.capacity); + if block_number.is_multiple_of(self.config.cleanup_interval) { + self.address_buckets.retain(|_, bucket| bucket.available <= bucket.capacity); } active_addresses - self.address_buckets.len() @@ -127,17 +118,13 @@ impl AddressGasLimiterInner { let start = Instant::now(); let removed_addresses = self.refresh_inner(block_number); - self.metrics - .record_refresh(removed_addresses, start.elapsed()); + self.metrics.record_refresh(removed_addresses, start.elapsed()); } } impl TokenBucket { fn new(capacity: u64) -> Self { - Self { - capacity, - available: capacity, - } + Self { capacity, available: capacity } } } diff --git a/crates/op-rbuilder/src/launcher.rs b/crates/op-rbuilder/src/launcher.rs index 08a541b65..124b939f6 100644 --- a/crates/op-rbuilder/src/launcher.rs +++ b/crates/op-rbuilder/src/launcher.rs @@ -70,9 +70,7 @@ where B: PayloadBuilder, { pub fn new() -> Self { - Self { - _builder: PhantomData, - } + Self { _builder: PhantomData } } } @@ -130,10 +128,11 @@ where .pool( OpPoolBuilder::::default() .with_enable_tx_conditional( - // Revert protection uses the same internal pool logic as conditional transactions + // Revert protection uses the same internal pool logic as + // conditional transactions // to garbage collect transactions out of the bundle range. - rollup_args.enable_tx_conditional - || builder_args.enable_revert_protection, + rollup_args.enable_tx_conditional || + builder_args.enable_revert_protection, ) .with_supervisor( rollup_args.supervisor_http.clone(), @@ -156,8 +155,7 @@ where reverted_cache, ); - ctx.modules - .add_or_replace_configured(revert_protection_ext.into_rpc())?; + ctx.modules.add_or_replace_configured(revert_protection_ext.into_rpc())?; } Ok(()) diff --git a/crates/op-rbuilder/src/metrics.rs b/crates/op-rbuilder/src/metrics.rs index df13bd1da..7120b10c7 100644 --- a/crates/op-rbuilder/src/metrics.rs +++ b/crates/op-rbuilder/src/metrics.rs @@ -111,13 +111,17 @@ pub struct OpRBuilderMetrics { pub payload_num_tx: Histogram, /// Latest number of transactions in the payload pub payload_num_tx_gauge: Gauge, - /// Histogram of transactions in the payload that were successfully simulated + /// Histogram of transactions in the payload that were successfully + /// simulated pub payload_num_tx_simulated: Histogram, - /// Latest number of transactions in the payload that were successfully simulated + /// Latest number of transactions in the payload that were successfully + /// simulated pub payload_num_tx_simulated_gauge: Gauge, - /// Histogram of transactions in the payload that were successfully simulated + /// Histogram of transactions in the payload that were successfully + /// simulated pub payload_num_tx_simulated_success: Histogram, - /// Latest number of transactions in the payload that were successfully simulated + /// Latest number of transactions in the payload that were successfully + /// simulated pub payload_num_tx_simulated_success_gauge: Gauge, /// Histogram of transactions in the payload that failed simulation pub payload_num_tx_simulated_fail: Histogram, @@ -133,7 +137,8 @@ pub struct OpRBuilderMetrics { pub da_tx_size_limit: Gauge, /// How much less flashblocks we issue to be on time with block construction pub reduced_flashblocks_number: Histogram, - /// How much less flashblocks we issued in reality, comparing to calculated number for block + /// How much less flashblocks we issued in reality, comparing to calculated + /// number for block pub missing_flashblocks_count: Histogram, /// How much time we have deducted from block building time pub flashblocks_time_drift: Histogram, @@ -161,22 +166,16 @@ impl OpRBuilderMetrics { num_txs_simulated_fail: impl IntoF64 + Copy, num_bundles_reverted: impl IntoF64, ) { - self.payload_tx_simulation_duration - .record(payload_tx_simulation_time); - self.payload_tx_simulation_gauge - .set(payload_tx_simulation_time); + self.payload_tx_simulation_duration.record(payload_tx_simulation_time); + self.payload_tx_simulation_gauge.set(payload_tx_simulation_time); self.payload_num_tx_considered.record(num_txs_considered); self.payload_num_tx_considered_gauge.set(num_txs_considered); self.payload_num_tx_simulated.record(num_txs_simulated); self.payload_num_tx_simulated_gauge.set(num_txs_simulated); - self.payload_num_tx_simulated_success - .record(num_txs_simulated_success); - self.payload_num_tx_simulated_success_gauge - .set(num_txs_simulated_success); - self.payload_num_tx_simulated_fail - .record(num_txs_simulated_fail); - self.payload_num_tx_simulated_fail_gauge - .set(num_txs_simulated_fail); + self.payload_num_tx_simulated_success.record(num_txs_simulated_success); + self.payload_num_tx_simulated_success_gauge.set(num_txs_simulated_success); + self.payload_num_tx_simulated_fail.record(num_txs_simulated_fail); + self.payload_num_tx_simulated_fail_gauge.set(num_txs_simulated_fail); self.bundles_reverted.record(num_bundles_reverted); } } diff --git a/crates/op-rbuilder/src/mock_tx.rs b/crates/op-rbuilder/src/mock_tx.rs index 3802fa9f8..f1048851a 100644 --- a/crates/op-rbuilder/src/mock_tx.rs +++ b/crates/op-rbuilder/src/mock_tx.rs @@ -40,12 +40,14 @@ impl MockFbTransactionFactory { self.validated_with_origin(TransactionOrigin::External, transaction) } - /// Validates a [`MockTransaction`] and returns a shared [`Arc`]. + /// Validates a [`MockTransaction`] and returns a shared + /// [`Arc`]. pub fn validated_arc(&mut self, transaction: MockFbTransaction) -> Arc { Arc::new(self.validated(transaction)) } - /// Converts the transaction into a validated transaction with a specified origin. + /// Converts the transaction into a validated transaction with a specified + /// origin. pub fn validated_with_origin( &mut self, origin: TransactionOrigin, @@ -114,8 +116,8 @@ pub struct MockFbTransaction { pub flashblock_number_max: Option, } -/// A validated transaction in the transaction pool, using [`MockTransaction`] as the transaction -/// type. +/// A validated transaction in the transaction pool, using [`MockTransaction`] +/// as the transaction type. /// /// This type is an alias for [`ValidPoolTransaction`]. pub type MockValidFbTx = ValidPoolTransaction; @@ -158,11 +160,11 @@ impl PoolTransaction for MockFbTransaction { // not to be manually set. fn cost(&self) -> &U256 { match &self.inner { - MockTransaction::Legacy { cost, .. } - | MockTransaction::Eip2930 { cost, .. } - | MockTransaction::Eip1559 { cost, .. } - | MockTransaction::Eip4844 { cost, .. } - | MockTransaction::Eip7702 { cost, .. } => cost, + MockTransaction::Legacy { cost, .. } | + MockTransaction::Eip2930 { cost, .. } | + MockTransaction::Eip1559 { cost, .. } | + MockTransaction::Eip4844 { cost, .. } | + MockTransaction::Eip7702 { cost, .. } => cost, } } @@ -194,10 +196,10 @@ impl alloy_consensus::Transaction for MockFbTransaction { fn chain_id(&self) -> Option { match &self.inner { MockTransaction::Legacy { chain_id, .. } => *chain_id, - MockTransaction::Eip1559 { chain_id, .. } - | MockTransaction::Eip4844 { chain_id, .. } - | MockTransaction::Eip2930 { chain_id, .. } - | MockTransaction::Eip7702 { chain_id, .. } => Some(*chain_id), + MockTransaction::Eip1559 { chain_id, .. } | + MockTransaction::Eip4844 { chain_id, .. } | + MockTransaction::Eip2930 { chain_id, .. } | + MockTransaction::Eip7702 { chain_id, .. } => Some(*chain_id), } } @@ -211,72 +213,47 @@ impl alloy_consensus::Transaction for MockFbTransaction { fn gas_price(&self) -> Option { match &self.inner { - MockTransaction::Legacy { gas_price, .. } - | MockTransaction::Eip2930 { gas_price, .. } => Some(*gas_price), + MockTransaction::Legacy { gas_price, .. } | + MockTransaction::Eip2930 { gas_price, .. } => Some(*gas_price), _ => None, } } fn max_fee_per_gas(&self) -> u128 { match &self.inner { - MockTransaction::Legacy { gas_price, .. } - | MockTransaction::Eip2930 { gas_price, .. } => *gas_price, - MockTransaction::Eip1559 { - max_fee_per_gas, .. - } - | MockTransaction::Eip4844 { - max_fee_per_gas, .. - } - | MockTransaction::Eip7702 { - max_fee_per_gas, .. - } => *max_fee_per_gas, + MockTransaction::Legacy { gas_price, .. } | + MockTransaction::Eip2930 { gas_price, .. } => *gas_price, + MockTransaction::Eip1559 { max_fee_per_gas, .. } | + MockTransaction::Eip4844 { max_fee_per_gas, .. } | + MockTransaction::Eip7702 { max_fee_per_gas, .. } => *max_fee_per_gas, } } fn max_priority_fee_per_gas(&self) -> Option { match &self.inner { MockTransaction::Legacy { .. } | MockTransaction::Eip2930 { .. } => None, - MockTransaction::Eip1559 { - max_priority_fee_per_gas, - .. - } - | MockTransaction::Eip4844 { - max_priority_fee_per_gas, - .. + MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } | + MockTransaction::Eip4844 { max_priority_fee_per_gas, .. } | + MockTransaction::Eip7702 { max_priority_fee_per_gas, .. } => { + Some(*max_priority_fee_per_gas) } - | MockTransaction::Eip7702 { - max_priority_fee_per_gas, - .. - } => Some(*max_priority_fee_per_gas), } } fn max_fee_per_blob_gas(&self) -> Option { match &self.inner { - MockTransaction::Eip4844 { - max_fee_per_blob_gas, - .. - } => Some(*max_fee_per_blob_gas), + MockTransaction::Eip4844 { max_fee_per_blob_gas, .. } => Some(*max_fee_per_blob_gas), _ => None, } } fn priority_fee_or_price(&self) -> u128 { match &self.inner { - MockTransaction::Legacy { gas_price, .. } - | MockTransaction::Eip2930 { gas_price, .. } => *gas_price, - MockTransaction::Eip1559 { - max_priority_fee_per_gas, - .. - } - | MockTransaction::Eip4844 { - max_priority_fee_per_gas, - .. - } - | MockTransaction::Eip7702 { - max_priority_fee_per_gas, - .. - } => *max_priority_fee_per_gas, + MockTransaction::Legacy { gas_price, .. } | + MockTransaction::Eip2930 { gas_price, .. } => *gas_price, + MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } | + MockTransaction::Eip4844 { max_priority_fee_per_gas, .. } | + MockTransaction::Eip7702 { max_priority_fee_per_gas, .. } => *max_priority_fee_per_gas, } } @@ -302,17 +279,14 @@ impl alloy_consensus::Transaction for MockFbTransaction { } fn is_dynamic_fee(&self) -> bool { - !matches!( - self.inner, - MockTransaction::Legacy { .. } | MockTransaction::Eip2930 { .. } - ) + !matches!(self.inner, MockTransaction::Legacy { .. } | MockTransaction::Eip2930 { .. }) } fn kind(&self) -> TxKind { match &self.inner { - MockTransaction::Legacy { to, .. } - | MockTransaction::Eip1559 { to, .. } - | MockTransaction::Eip2930 { to, .. } => *to, + MockTransaction::Legacy { to, .. } | + MockTransaction::Eip1559 { to, .. } | + MockTransaction::Eip2930 { to, .. } => *to, MockTransaction::Eip4844 { to, .. } | MockTransaction::Eip7702 { to, .. } => { TxKind::Call(*to) } @@ -321,20 +295,20 @@ impl alloy_consensus::Transaction for MockFbTransaction { fn is_create(&self) -> bool { match &self.inner { - MockTransaction::Legacy { to, .. } - | MockTransaction::Eip1559 { to, .. } - | MockTransaction::Eip2930 { to, .. } => to.is_create(), + MockTransaction::Legacy { to, .. } | + MockTransaction::Eip1559 { to, .. } | + MockTransaction::Eip2930 { to, .. } => to.is_create(), MockTransaction::Eip4844 { .. } | MockTransaction::Eip7702 { .. } => false, } } fn value(&self) -> U256 { match &self.inner { - MockTransaction::Legacy { value, .. } - | MockTransaction::Eip1559 { value, .. } - | MockTransaction::Eip2930 { value, .. } - | MockTransaction::Eip4844 { value, .. } - | MockTransaction::Eip7702 { value, .. } => *value, + MockTransaction::Legacy { value, .. } | + MockTransaction::Eip1559 { value, .. } | + MockTransaction::Eip2930 { value, .. } | + MockTransaction::Eip4844 { value, .. } | + MockTransaction::Eip7702 { value, .. } => *value, } } @@ -345,40 +319,23 @@ impl alloy_consensus::Transaction for MockFbTransaction { fn access_list(&self) -> Option<&AccessList> { match &self.inner { MockTransaction::Legacy { .. } => None, - MockTransaction::Eip1559 { - access_list: accesslist, - .. - } - | MockTransaction::Eip4844 { - access_list: accesslist, - .. - } - | MockTransaction::Eip2930 { - access_list: accesslist, - .. - } - | MockTransaction::Eip7702 { - access_list: accesslist, - .. - } => Some(accesslist), + MockTransaction::Eip1559 { access_list: accesslist, .. } | + MockTransaction::Eip4844 { access_list: accesslist, .. } | + MockTransaction::Eip2930 { access_list: accesslist, .. } | + MockTransaction::Eip7702 { access_list: accesslist, .. } => Some(accesslist), } } fn blob_versioned_hashes(&self) -> Option<&[B256]> { match &self.inner { - MockTransaction::Eip4844 { - blob_versioned_hashes, - .. - } => Some(blob_versioned_hashes), + MockTransaction::Eip4844 { blob_versioned_hashes, .. } => Some(blob_versioned_hashes), _ => None, } } fn authorization_list(&self) -> Option<&[SignedAuthorization]> { match &self.inner { - MockTransaction::Eip7702 { - authorization_list, .. - } => Some(authorization_list), + MockTransaction::Eip7702 { authorization_list, .. } => Some(authorization_list), _ => None, } } @@ -422,9 +379,7 @@ impl EthPoolTransaction for MockFbTransaction { ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { match &self.inner { MockTransaction::Eip4844 { .. } => Ok(()), - _ => Err(BlobTransactionValidationError::NotBlobTransaction( - self.inner.tx_type(), - )), + _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.inner.tx_type())), } } } diff --git a/crates/op-rbuilder/src/monitor_tx_pool.rs b/crates/op-rbuilder/src/monitor_tx_pool.rs index a9cbbae68..28d98484c 100644 --- a/crates/op-rbuilder/src/monitor_tx_pool.rs +++ b/crates/op-rbuilder/src/monitor_tx_pool.rs @@ -35,20 +35,14 @@ async fn transaction_event_log( "Transaction event received" ) } - FullTransactionEvent::Mined { - tx_hash, - block_hash, - } => info!( + FullTransactionEvent::Mined { tx_hash, block_hash } => info!( target = "monitoring", tx_hash = tx_hash.to_string(), kind = "mined", block_hash = block_hash.to_string(), "Transaction event received" ), - FullTransactionEvent::Replaced { - transaction, - replaced_by, - } => info!( + FullTransactionEvent::Replaced { transaction, replaced_by } => info!( target = "monitoring", tx_hash = transaction.hash().to_string(), kind = "replaced", diff --git a/crates/op-rbuilder/src/primitives/bundle.rs b/crates/op-rbuilder/src/primitives/bundle.rs index 3c415ad5c..a8ecf5bbc 100644 --- a/crates/op-rbuilder/src/primitives/bundle.rs +++ b/crates/op-rbuilder/src/primitives/bundle.rs @@ -103,11 +103,7 @@ pub struct Bundle { /// builder node's clock, which may not be perfectly synchronized with /// network time. Block number constraints are preferred for deterministic /// behavior. - #[serde( - default, - rename = "minTimestamp", - skip_serializing_if = "Option::is_none" - )] + #[serde(default, rename = "minTimestamp", skip_serializing_if = "Option::is_none")] pub min_timestamp: Option, /// Maximum timestamp (Unix epoch seconds) for bundle inclusion. @@ -116,11 +112,7 @@ pub struct Bundle { /// builder node's clock, which may not be perfectly synchronized with /// network time. Block number constraints are preferred for deterministic /// behavior. - #[serde( - default, - rename = "maxTimestamp", - skip_serializing_if = "Option::is_none" - )] + #[serde(default, rename = "maxTimestamp", skip_serializing_if = "Option::is_none")] pub max_timestamp: Option, } @@ -142,11 +134,7 @@ pub enum BundleConditionalError { #[error( "block_number_max ({max}) is too high (current: {current}, max allowed: {max_allowed})" )] - MaxBlockTooHigh { - max: u64, - current: u64, - max_allowed: u64, - }, + MaxBlockTooHigh { max: u64, current: u64, max_allowed: u64 }, /// When no explicit maximum block number is provided, the system uses /// `current_block + MAX_BLOCK_RANGE_BLOCKS` as the default maximum. This /// error occurs when the specified minimum exceeds this default maximum. @@ -175,10 +163,10 @@ impl Bundle { // Validate block number ranges if let Some(max) = block_number_max { // Check if min > max - if let Some(min) = block_number_min { - if min > max { - return Err(BundleConditionalError::MinGreaterThanMax { min, max }); - } + if let Some(min) = block_number_min && + min > max + { + return Err(BundleConditionalError::MinGreaterThanMax { min, max }); } // The max block cannot be a past block @@ -204,23 +192,22 @@ impl Bundle { block_number_max = Some(default_max); // Ensure that the new max is not smaller than the min - if let Some(min) = block_number_min { - if min > default_max { - return Err(BundleConditionalError::MinTooHighForDefaultRange { - min, - max_allowed: default_max, - }); - } + if let Some(min) = block_number_min && + min > default_max + { + return Err(BundleConditionalError::MinTooHighForDefaultRange { + min, + max_allowed: default_max, + }); } } // Validate flashblock number range - if let Some(min) = self.flashblock_number_min { - if let Some(max) = self.flashblock_number_max { - if min > max { - return Err(BundleConditionalError::FlashblockMinGreaterThanMax { min, max }); - } - } + if let Some(min) = self.flashblock_number_min && + let Some(max) = self.flashblock_number_max && + min > max + { + return Err(BundleConditionalError::FlashblockMinGreaterThanMax { min, max }); } Ok(BundleConditional { @@ -259,22 +246,13 @@ mod tests { #[test] fn test_bundle_conditional_no_bounds() { - let bundle = Bundle { - transactions: vec![], - ..Default::default() - }; + let bundle = Bundle { transactions: vec![], ..Default::default() }; let last_block = 1000; - let result = bundle - .conditional(last_block) - .unwrap() - .transaction_conditional; + let result = bundle.conditional(last_block).unwrap().transaction_conditional; assert_eq!(result.block_number_min, None); - assert_eq!( - result.block_number_max, - Some(last_block + MAX_BLOCK_RANGE_BLOCKS) - ); + assert_eq!(result.block_number_max, Some(last_block + MAX_BLOCK_RANGE_BLOCKS)); } #[test] @@ -286,10 +264,7 @@ mod tests { }; let last_block = 1000; - let result = bundle - .conditional(last_block) - .unwrap() - .transaction_conditional; + let result = bundle.conditional(last_block).unwrap().transaction_conditional; assert_eq!(result.block_number_min, Some(1002)); assert_eq!(result.block_number_max, Some(1005)); @@ -308,38 +283,26 @@ mod tests { assert!(matches!( result, - Err(BundleConditionalError::MinGreaterThanMax { - min: 1010, - max: 1005 - }) + Err(BundleConditionalError::MinGreaterThanMax { min: 1010, max: 1005 }) )); } #[test] fn test_bundle_conditional_max_in_past() { - let bundle = Bundle { - block_number_max: Some(999), - ..Default::default() - }; + let bundle = Bundle { block_number_max: Some(999), ..Default::default() }; let last_block = 1000; let result = bundle.conditional(last_block); assert!(matches!( result, - Err(BundleConditionalError::MaxBlockInPast { - max: 999, - current: 1000 - }) + Err(BundleConditionalError::MaxBlockInPast { max: 999, current: 1000 }) )); } #[test] fn test_bundle_conditional_max_too_high() { - let bundle = Bundle { - block_number_max: Some(1020), - ..Default::default() - }; + let bundle = Bundle { block_number_max: Some(1020), ..Default::default() }; let last_block = 1000; let result = bundle.conditional(last_block); @@ -356,35 +319,23 @@ mod tests { #[test] fn test_bundle_conditional_min_too_high_for_default_range() { - let bundle = Bundle { - block_number_min: Some(1015), - ..Default::default() - }; + let bundle = Bundle { block_number_min: Some(1015), ..Default::default() }; let last_block = 1000; let result = bundle.conditional(last_block); assert!(matches!( result, - Err(BundleConditionalError::MinTooHighForDefaultRange { - min: 1015, - max_allowed: 1010 - }) + Err(BundleConditionalError::MinTooHighForDefaultRange { min: 1015, max_allowed: 1010 }) )); } #[test] fn test_bundle_conditional_with_only_min() { - let bundle = Bundle { - block_number_min: Some(1005), - ..Default::default() - }; + let bundle = Bundle { block_number_min: Some(1005), ..Default::default() }; let last_block = 1000; - let result = bundle - .conditional(last_block) - .unwrap() - .transaction_conditional; + let result = bundle.conditional(last_block).unwrap().transaction_conditional; assert_eq!(result.block_number_min, Some(1005)); assert_eq!(result.block_number_max, Some(1010)); // last_block + MAX_BLOCK_RANGE_BLOCKS @@ -392,16 +343,10 @@ mod tests { #[test] fn test_bundle_conditional_with_only_max() { - let bundle = Bundle { - block_number_max: Some(1008), - ..Default::default() - }; + let bundle = Bundle { block_number_max: Some(1008), ..Default::default() }; let last_block = 1000; - let result = bundle - .conditional(last_block) - .unwrap() - .transaction_conditional; + let result = bundle.conditional(last_block).unwrap().transaction_conditional; assert_eq!(result.block_number_min, None); assert_eq!(result.block_number_max, Some(1008)); @@ -409,16 +354,10 @@ mod tests { #[test] fn test_bundle_conditional_min_lower_than_last_block() { - let bundle = Bundle { - block_number_min: Some(999), - ..Default::default() - }; + let bundle = Bundle { block_number_min: Some(999), ..Default::default() }; let last_block = 1000; - let result = bundle - .conditional(last_block) - .unwrap() - .transaction_conditional; + let result = bundle.conditional(last_block).unwrap().transaction_conditional; assert_eq!(result.block_number_min, Some(999)); assert_eq!(result.block_number_max, Some(1010)); @@ -458,10 +397,7 @@ mod tests { #[test] fn test_bundle_conditional_with_only_flashblock_min() { - let bundle = Bundle { - flashblock_number_min: Some(100), - ..Default::default() - }; + let bundle = Bundle { flashblock_number_min: Some(100), ..Default::default() }; let last_block = 1000; let result = bundle.conditional(last_block).unwrap(); @@ -472,10 +408,7 @@ mod tests { #[test] fn test_bundle_conditional_with_only_flashblock_max() { - let bundle = Bundle { - flashblock_number_max: Some(105), - ..Default::default() - }; + let bundle = Bundle { flashblock_number_max: Some(105), ..Default::default() }; let last_block = 1000; let result = bundle.conditional(last_block).unwrap(); diff --git a/crates/op-rbuilder/src/primitives/reth/engine_api_builder.rs b/crates/op-rbuilder/src/primitives/reth/engine_api_builder.rs index aaa733ca0..be390ddd7 100644 --- a/crates/op-rbuilder/src/primitives/reth/engine_api_builder.rs +++ b/crates/op-rbuilder/src/primitives/reth/engine_api_builder.rs @@ -41,9 +41,7 @@ where EV: Default, { fn default() -> Self { - Self { - engine_validator_builder: EV::default(), - } + Self { engine_validator_builder: EV::default() } } } @@ -56,9 +54,7 @@ where type EngineApi = OpEngineApiExt; async fn build_engine_api(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { - let Self { - engine_validator_builder, - } = self; + let Self { engine_validator_builder } = self; let engine_validator = engine_validator_builder.build(ctx).await?; let client = ClientVersionV1 { @@ -117,9 +113,7 @@ where versioned_hashes: Vec, parent_beacon_block_root: B256, ) -> RpcResult { - self.inner - .new_payload_v3(payload, versioned_hashes, parent_beacon_block_root) - .await + self.inner.new_payload_v3(payload, versioned_hashes, parent_beacon_block_root).await } async fn new_payload_v4( @@ -130,12 +124,7 @@ where execution_requests: Requests, ) -> RpcResult { self.inner - .new_payload_v4( - payload, - versioned_hashes, - parent_beacon_block_root, - execution_requests, - ) + .new_payload_v4(payload, versioned_hashes, parent_beacon_block_root, execution_requests) .await } @@ -144,9 +133,7 @@ where fork_choice_state: ForkchoiceState, payload_attributes: Option, ) -> RpcResult { - self.inner - .fork_choice_updated_v1(fork_choice_state, payload_attributes) - .await + self.inner.fork_choice_updated_v1(fork_choice_state, payload_attributes).await } async fn fork_choice_updated_v2( @@ -154,9 +141,7 @@ where fork_choice_state: ForkchoiceState, payload_attributes: Option, ) -> RpcResult { - self.inner - .fork_choice_updated_v2(fork_choice_state, payload_attributes) - .await + self.inner.fork_choice_updated_v2(fork_choice_state, payload_attributes).await } async fn fork_choice_updated_v3( @@ -164,9 +149,7 @@ where fork_choice_state: ForkchoiceState, payload_attributes: Option, ) -> RpcResult { - self.inner - .fork_choice_updated_v3(fork_choice_state, payload_attributes) - .await + self.inner.fork_choice_updated_v3(fork_choice_state, payload_attributes).await } async fn get_payload_v2( @@ -202,9 +185,7 @@ where start: U64, count: U64, ) -> RpcResult { - self.inner - .get_payload_bodies_by_range_v1(start, count) - .await + self.inner.get_payload_bodies_by_range_v1(start, count).await } async fn signal_superchain_v1(&self, signal: SuperchainSignal) -> RpcResult { @@ -241,7 +222,8 @@ where /// #[rpc(server, namespace = "engine", server_bounds(Engine::PayloadAttributes: jsonrpsee::core::DeserializeOwned))] pub trait OpRbuilderEngineApi { - /// Sends the given payload to the execution layer client, as specified for the Shanghai fork. + /// Sends the given payload to the execution layer client, as specified for + /// the Shanghai fork. /// /// See also /// @@ -249,7 +231,8 @@ pub trait OpRbuilderEngineApi { #[method(name = "newPayloadV2")] async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult; - /// Sends the given payload to the execution layer client, as specified for the Cancun fork. + /// Sends the given payload to the execution layer client, as specified for + /// the Cancun fork. /// /// See also /// @@ -267,7 +250,8 @@ pub trait OpRbuilderEngineApi { parent_beacon_block_root: B256, ) -> RpcResult; - /// Sends the given payload to the execution layer client, as specified for the Prague fork. + /// Sends the given payload to the execution layer client, as specified for + /// the Prague fork. /// /// See also /// @@ -286,7 +270,8 @@ pub trait OpRbuilderEngineApi { /// /// This exists because it is used by op-node: /// - /// Caution: This should not accept the `withdrawals` field in the payload attributes. + /// Caution: This should not accept the `withdrawals` field in the payload + /// attributes. #[method(name = "forkchoiceUpdatedV1")] async fn fork_choice_updated_v1( &self, @@ -294,10 +279,11 @@ pub trait OpRbuilderEngineApi { payload_attributes: Option, ) -> RpcResult; - /// Updates the execution layer client with the given fork choice, as specified for the Shanghai - /// fork. + /// Updates the execution layer client with the given fork choice, as + /// specified for the Shanghai fork. /// - /// Caution: This should not accept the `parentBeaconBlockRoot` field in the payload attributes. + /// Caution: This should not accept the `parentBeaconBlockRoot` field in the + /// payload attributes. /// /// See also /// @@ -310,8 +296,8 @@ pub trait OpRbuilderEngineApi { payload_attributes: Option, ) -> RpcResult; - /// Updates the execution layer client with the given fork choice, as specified for the Cancun - /// fork. + /// Updates the execution layer client with the given fork choice, as + /// specified for the Cancun fork. /// /// See also /// @@ -326,13 +312,14 @@ pub trait OpRbuilderEngineApi { payload_attributes: Option, ) -> RpcResult; - /// Retrieves an execution payload from a previously started build process, as specified for the - /// Shanghai fork. + /// Retrieves an execution payload from a previously started build process, + /// as specified for the Shanghai fork. /// /// See also /// /// Note: - /// > Provider software MAY stop the corresponding build process after serving this call. + /// > Provider software MAY stop the corresponding build process after + /// > serving this call. /// /// No modifications needed for OP compatibility. #[method(name = "getPayloadV2")] @@ -341,13 +328,14 @@ pub trait OpRbuilderEngineApi { payload_id: PayloadId, ) -> RpcResult; - /// Retrieves an execution payload from a previously started build process, as specified for the - /// Cancun fork. + /// Retrieves an execution payload from a previously started build process, + /// as specified for the Cancun fork. /// /// See also /// /// Note: - /// > Provider software MAY stop the corresponding build process after serving this call. + /// > Provider software MAY stop the corresponding build process after + /// > serving this call. /// /// OP modifications: /// - the response type is extended to [`EngineTypes::ExecutionPayloadEnvelopeV3`]. @@ -357,13 +345,15 @@ pub trait OpRbuilderEngineApi { payload_id: PayloadId, ) -> RpcResult; - /// Returns the most recent version of the payload that is available in the corresponding - /// payload build process at the time of receiving this call. + /// Returns the most recent version of the payload that is available in the + /// corresponding payload build process at the time of receiving this + /// call. /// /// See also /// /// Note: - /// > Provider software MAY stop the corresponding build process after serving this call. + /// > Provider software MAY stop the corresponding build process after + /// > serving this call. /// /// OP modifications: /// - the response type is extended to [`EngineTypes::ExecutionPayloadEnvelopeV4`]. @@ -382,16 +372,16 @@ pub trait OpRbuilderEngineApi { block_hashes: Vec, ) -> RpcResult; - /// Returns the execution payload bodies by the range starting at `start`, containing `count` - /// blocks. + /// Returns the execution payload bodies by the range starting at `start`, + /// containing `count` blocks. /// - /// WARNING: This method is associated with the BeaconBlocksByRange message in the consensus - /// layer p2p specification, meaning the input should be treated as untrusted or potentially - /// adversarial. + /// WARNING: This method is associated with the BeaconBlocksByRange message + /// in the consensus layer p2p specification, meaning the input should + /// be treated as untrusted or potentially adversarial. /// - /// Implementers should take care when acting on the input to this method, specifically - /// ensuring that the range is limited properly, and that the range boundaries are computed - /// correctly and without panics. + /// Implementers should take care when acting on the input to this method, + /// specifically ensuring that the range is limited properly, and that + /// the range boundaries are computed correctly and without panics. /// /// See also #[method(name = "getPayloadBodiesByRangeV1")] @@ -402,8 +392,8 @@ pub trait OpRbuilderEngineApi { ) -> RpcResult; /// Signals superchain information to the Engine. - /// Returns the latest supported OP-Stack protocol version of the execution engine. - /// See also + /// Returns the latest supported OP-Stack protocol version of the execution + /// engine. See also #[method(name = "engine_signalSuperchainV1")] async fn signal_superchain_v1(&self, _signal: SuperchainSignal) -> RpcResult; @@ -419,7 +409,8 @@ pub trait OpRbuilderEngineApi { client_version: ClientVersionV1, ) -> RpcResult>; - /// Returns the list of Engine API methods supported by the execution layer client software. + /// Returns the list of Engine API methods supported by the execution layer + /// client software. /// /// See also #[method(name = "exchangeCapabilities")] diff --git a/crates/op-rbuilder/src/revert_protection.rs b/crates/op-rbuilder/src/revert_protection.rs index f3fa8924e..7090e8591 100644 --- a/crates/op-rbuilder/src/revert_protection.rs +++ b/crates/op-rbuilder/src/revert_protection.rs @@ -83,9 +83,7 @@ where self.metrics.failed_bundles.increment(1); } - self.metrics - .bundle_receive_duration - .record(request_start_time.elapsed()); + self.metrics.bundle_receive_duration.record(request_start_time.elapsed()); bundle_result } @@ -98,10 +96,8 @@ where Ok(Some(receipt)) } else if self.reverted_cache.get(&hash).await.is_some() { // Found the transaction in the reverted cache - Err( - EthApiError::InvalidParams("the transaction was dropped from the pool".into()) - .into(), - ) + Err(EthApiError::InvalidParams("the transaction was dropped from the pool".into()) + .into()) } else { Ok(None) } @@ -115,10 +111,8 @@ where Eth: FullEthApi + Send + Sync + Clone + 'static, { async fn send_bundle_inner(&self, bundle: Bundle) -> RpcResult { - let last_block_number = self - .provider - .best_block_number() - .map_err(|_e| EthApiError::InternalEthError)?; + let last_block_number = + self.provider.best_block_number().map_err(|_e| EthApiError::InternalEthError)?; // Only one transaction in the bundle is expected let bundle_transaction = match bundle.transactions.len() { @@ -137,9 +131,7 @@ where } }; - let conditional = bundle - .conditional(last_block_number) - .map_err(EthApiError::from)?; + let conditional = bundle.conditional(last_block_number).map_err(EthApiError::from)?; let recovered = recover_raw_transaction(&bundle_transaction)?; let pool_transaction = @@ -155,9 +147,7 @@ where .await .map_err(EthApiError::from)?; - let result = BundleResult { - bundle_hash: outcome.hash, - }; + let result = BundleResult { bundle_hash: outcome.hash }; Ok(result) } } diff --git a/crates/op-rbuilder/src/tests/data_availability.rs b/crates/op-rbuilder/src/tests/data_availability.rs index ffee176fe..b25bccddf 100644 --- a/crates/op-rbuilder/src/tests/data_availability.rs +++ b/crates/op-rbuilder/src/tests/data_availability.rs @@ -3,36 +3,32 @@ use alloy_provider::Provider; use macros::{if_flashblocks, if_standard, rb_test}; /// This test ensures that the transaction size limit is respected. -/// We will set limit to 1 byte and see that the builder will not include any transactions. +/// We will set limit to 1 byte and see that the builder will not include any +/// transactions. #[rb_test] async fn tx_size_limit(rbuilder: LocalInstance) -> eyre::Result<()> { let driver = rbuilder.driver().await?; - // Set (max_tx_da_size, max_block_da_size), with this case block won't fit any transaction + // Set (max_tx_da_size, max_block_da_size), with this case block won't fit any + // transaction let call = driver .provider() .raw_request::<(i32, i32), bool>("miner_setMaxDASize".into(), (1, 0)) .await?; assert!(call, "miner_setMaxDASize should be executed successfully"); - let unfit_tx = driver - .create_transaction() - .with_max_priority_fee_per_gas(50) - .send() - .await?; + let unfit_tx = driver.create_transaction().with_max_priority_fee_per_gas(50).send().await?; let block = driver.build_new_block().await?; // tx should not be included because we set the tx_size_limit to 1 - assert!( - !block.includes(unfit_tx.tx_hash()), - "transaction should not be included in the block" - ); + assert!(!block.includes(unfit_tx.tx_hash()), "transaction should not be included in the block"); Ok(()) } /// This test ensures that the block size limit is respected. -/// We will set limit to 1 byte and see that the builder will not include any transactions. +/// We will set limit to 1 byte and see that the builder will not include any +/// transactions. #[rb_test] async fn block_size_limit(rbuilder: LocalInstance) -> eyre::Result<()> { let driver = rbuilder.driver().await?; @@ -47,18 +43,16 @@ async fn block_size_limit(rbuilder: LocalInstance) -> eyre::Result<()> { let (unfit_tx, block) = driver.build_new_block_with_valid_transaction().await?; // tx should not be included because we set the tx_size_limit to 1 - assert!( - !block.includes(&unfit_tx), - "transaction should not be included in the block" - ); + assert!(!block.includes(&unfit_tx), "transaction should not be included in the block"); Ok(()) } /// This test ensures that block will fill up to the limit. /// Size of each transaction is 100000000 -/// We will set limit to 3 txs and see that the builder will include 3 transactions. -/// We should not forget about builder transaction so we will spawn only 2 regular txs. +/// We will set limit to 3 txs and see that the builder will include 3 +/// transactions. We should not forget about builder transaction so we will +/// spawn only 2 regular txs. #[rb_test] async fn block_fill(rbuilder: LocalInstance) -> eyre::Result<()> { let driver = rbuilder.driver().await?; @@ -70,18 +64,10 @@ async fn block_fill(rbuilder: LocalInstance) -> eyre::Result<()> { .await?; assert!(call, "miner_setMaxDASize should be executed successfully"); - // We already have 2 so we will spawn one more to check that it won't be included (it won't fit - // because of builder tx) - let fit_tx_1 = driver - .create_transaction() - .with_max_priority_fee_per_gas(50) - .send() - .await?; - let fit_tx_2 = driver - .create_transaction() - .with_max_priority_fee_per_gas(50) - .send() - .await?; + // We already have 2 so we will spawn one more to check that it won't be + // included (it won't fit because of builder tx) + let fit_tx_1 = driver.create_transaction().with_max_priority_fee_per_gas(50).send().await?; + let fit_tx_2 = driver.create_transaction().with_max_priority_fee_per_gas(50).send().await?; let unfit_tx_3 = driver.create_transaction().send().await?; let block = driver.build_new_block_with_current_timestamp(None).await?; @@ -100,10 +86,7 @@ async fn block_fill(rbuilder: LocalInstance) -> eyre::Result<()> { assert!(!block.includes(fit_tx_2.tx_hash()), "tx should not be in block"); } - assert!( - !block.includes(unfit_tx_3.tx_hash()), - "unfit tx should not be in block" - ); + assert!(!block.includes(unfit_tx_3.tx_hash()), "unfit tx should not be in block"); assert!( driver.latest_full().await?.transactions.len() == 4, "builder + deposit + 2 valid txs should be in the block" diff --git a/crates/op-rbuilder/src/tests/flashblocks.rs b/crates/op-rbuilder/src/tests/flashblocks.rs index d9789457d..ecfb108f3 100644 --- a/crates/op-rbuilder/src/tests/flashblocks.rs +++ b/crates/op-rbuilder/src/tests/flashblocks.rs @@ -28,11 +28,7 @@ async fn smoke_dynamic_base(rbuilder: LocalInstance) -> eyre::Result<()> { for _ in 0..10 { for _ in 0..5 { // send a valid transaction - let _ = driver - .create_transaction() - .random_valid_transfer() - .send() - .await?; + let _ = driver.create_transaction().random_valid_transfer().send().await?; } let block = driver.build_new_block_with_current_timestamp(None).await?; assert_eq!(block.transactions.len(), 8, "Got: {:?}", block.transactions); // 5 normal txn + deposit + 2 builder txn @@ -66,11 +62,7 @@ async fn smoke_dynamic_unichain(rbuilder: LocalInstance) -> eyre::Result<()> { for _ in 0..10 { for _ in 0..5 { // send a valid transaction - let _ = driver - .create_transaction() - .random_valid_transfer() - .send() - .await?; + let _ = driver.create_transaction().random_valid_transfer().send().await?; } let block = driver.build_new_block_with_current_timestamp(None).await?; assert_eq!(block.transactions.len(), 8, "Got: {:?}", block.transactions); // 5 normal txn + deposit + 2 builder txn @@ -104,11 +96,7 @@ async fn smoke_classic_unichain(rbuilder: LocalInstance) -> eyre::Result<()> { for _ in 0..10 { for _ in 0..5 { // send a valid transaction - let _ = driver - .create_transaction() - .random_valid_transfer() - .send() - .await?; + let _ = driver.create_transaction().random_valid_transfer().send().await?; } let block = driver.build_new_block().await?; assert_eq!(block.transactions.len(), 8, "Got: {:?}", block.transactions); // 5 normal txn + deposit + 2 builder txn @@ -142,11 +130,7 @@ async fn smoke_classic_base(rbuilder: LocalInstance) -> eyre::Result<()> { for _ in 0..10 { for _ in 0..5 { // send a valid transaction - let _ = driver - .create_transaction() - .random_valid_transfer() - .send() - .await?; + let _ = driver.create_transaction().random_valid_transfer().send().await?; } let block = driver.build_new_block().await?; assert_eq!(block.transactions.len(), 8, "Got: {:?}", block.transactions); // 5 normal txn + deposit + 2 builder txn @@ -180,21 +164,12 @@ async fn unichain_dynamic_with_lag(rbuilder: LocalInstance) -> eyre::Result<()> for i in 0..9 { for _ in 0..5 { // send a valid transaction - let _ = driver - .create_transaction() - .random_valid_transfer() - .send() - .await?; + let _ = driver.create_transaction().random_valid_transfer().send().await?; } let block = driver .build_new_block_with_current_timestamp(Some(Duration::from_millis(i * 100))) .await?; - assert_eq!( - block.transactions.len(), - 8, - "Got: {:#?}", - block.transactions - ); // 5 normal txn + deposit + 2 builder txn + assert_eq!(block.transactions.len(), 8, "Got: {:#?}", block.transactions); // 5 normal txn + deposit + 2 builder txn tokio::time::sleep(std::time::Duration::from_secs(1)).await; } @@ -223,15 +198,10 @@ async fn dynamic_with_full_block_lag(rbuilder: LocalInstance) -> eyre::Result<() for _ in 0..5 { // send a valid transaction - let _ = driver - .create_transaction() - .random_valid_transfer() - .send() - .await?; + let _ = driver.create_transaction().random_valid_transfer().send().await?; } - let block = driver - .build_new_block_with_current_timestamp(Some(Duration::from_millis(999))) - .await?; + let block = + driver.build_new_block_with_current_timestamp(Some(Duration::from_millis(999))).await?; // We could only produce block with deposits + builder tx because of short time frame assert_eq!(block.transactions.len(), 2); @@ -283,17 +253,10 @@ async fn test_flashblock_min_filtering(rbuilder: LocalInstance) -> eyre::Result< // Check that tx1 comes before tx2 let tx1_hash = *tx1.tx_hash(); let tx2_hash = *tx2.tx_hash(); - let tx1_pos = flashblocks_listener - .find_transaction_flashblock(&tx1_hash) - .unwrap(); - let tx2_pos = flashblocks_listener - .find_transaction_flashblock(&tx2_hash) - .unwrap(); - - assert!( - tx1_pos < tx2_pos, - "tx {tx1_hash:?} does not come before {tx2_hash:?}" - ); + let tx1_pos = flashblocks_listener.find_transaction_flashblock(&tx1_hash).unwrap(); + let tx2_pos = flashblocks_listener.find_transaction_flashblock(&tx2_hash).unwrap(); + + assert!(tx1_pos < tx2_pos, "tx {tx1_hash:?} does not come before {tx2_hash:?}"); let flashblocks = flashblocks_listener.get_flashblocks(); assert_eq!(6, flashblocks.len()); @@ -330,11 +293,7 @@ async fn test_flashblock_max_filtering(rbuilder: LocalInstance) -> eyre::Result< .await?; assert!(call, "miner_setMaxDASize should be executed successfully"); - let _fit_tx_1 = driver - .create_transaction() - .with_max_priority_fee_per_gas(50) - .send() - .await?; + let _fit_tx_1 = driver.create_transaction().with_max_priority_fee_per_gas(50).send().await?; let tx1 = driver .create_transaction() @@ -345,11 +304,7 @@ async fn test_flashblock_max_filtering(rbuilder: LocalInstance) -> eyre::Result< let block = driver.build_new_block_with_current_timestamp(None).await?; assert!(!block.includes(tx1.tx_hash())); - assert!( - flashblocks_listener - .find_transaction_flashblock(tx1.tx_hash()) - .is_none() - ); + assert!(flashblocks_listener.find_transaction_flashblock(tx1.tx_hash()).is_none()); let flashblocks = flashblocks_listener.get_flashblocks(); assert_eq!(6, flashblocks.len()); @@ -379,9 +334,7 @@ async fn test_flashblock_min_max_filtering(rbuilder: LocalInstance) -> eyre::Res .create_transaction() .random_valid_transfer() .with_bundle( - BundleOpts::default() - .with_flashblock_number_max(2) - .with_flashblock_number_min(2), + BundleOpts::default().with_flashblock_number_max(2).with_flashblock_number_min(2), ) .send() .await?; @@ -391,9 +344,7 @@ async fn test_flashblock_min_max_filtering(rbuilder: LocalInstance) -> eyre::Res // It ends up in the 2nd flashblock assert_eq!( 2, - flashblocks_listener - .find_transaction_flashblock(tx1.tx_hash()) - .unwrap(), + flashblocks_listener.find_transaction_flashblock(tx1.tx_hash()).unwrap(), "Transaction should be in the 2nd flashblock" ); @@ -422,20 +373,13 @@ async fn test_flashblocks_no_state_root_calculation(rbuilder: LocalInstance) -> let driver = rbuilder.driver().await?; // Send a transaction to ensure block has some activity - let _tx = driver - .create_transaction() - .random_valid_transfer() - .send() - .await?; + let _tx = driver.create_transaction().random_valid_transfer().send().await?; // Build a block with current timestamp (not historical) and calculate_state_root: false let block = driver.build_new_block_with_current_timestamp(None).await?; // Verify that flashblocks are still produced (block should have transactions) - assert!( - block.transactions.len() > 2, - "Block should contain transactions" - ); // deposit + builder tx + user tx + assert!(block.transactions.len() > 2, "Block should contain transactions"); // deposit + builder tx + user tx // Verify that state root is not calculated (should be zero) assert_eq!( diff --git a/crates/op-rbuilder/src/tests/framework/apis.rs b/crates/op-rbuilder/src/tests/framework/apis.rs index 6ad0b98a0..c666ff9b2 100644 --- a/crates/op-rbuilder/src/tests/framework/apis.rs +++ b/crates/op-rbuilder/src/tests/framework/apis.rs @@ -92,9 +92,7 @@ impl EngineApi { pub fn with_localhost_port(port: u16) -> EngineApi { EngineApi:: { address: Address::Http( - format!("http://localhost:{port}") - .parse() - .expect("Invalid URL"), + format!("http://localhost:{port}").parse().expect("Invalid URL"), ), jwt_secret: DEFAULT_JWT_TOKEN.parse().expect("Invalid JWT"), _tag: PhantomData, @@ -146,15 +144,9 @@ impl EngineApi

{ &self, payload_id: PayloadId, ) -> eyre::Result<::ExecutionPayloadEnvelopeV4> { - debug!( - "Fetching payload with id: {} at {}", - payload_id, - chrono::Utc::now() - ); - Ok( - OpEngineApiClient::::get_payload_v4(&self.client().await, payload_id) - .await?, - ) + debug!("Fetching payload with id: {} at {}", payload_id, chrono::Utc::now()); + Ok(OpEngineApiClient::::get_payload_v4(&self.client().await, payload_id) + .await?) } pub async fn new_payload( diff --git a/crates/op-rbuilder/src/tests/framework/driver.rs b/crates/op-rbuilder/src/tests/framework/driver.rs index 5f273c1c1..047463fc3 100644 --- a/crates/op-rbuilder/src/tests/framework/driver.rs +++ b/crates/op-rbuilder/src/tests/framework/driver.rs @@ -20,9 +20,10 @@ use crate::{ const DEFAULT_GAS_LIMIT: u64 = 10_000_000; -/// The ChainDriver is a type that allows driving the op builder node to build new blocks manually -/// by calling the `build_new_block` method. It uses the Engine API to interact with the node -/// and the provider to fetch blocks and transactions. +/// The ChainDriver is a type that allows driving the op builder node to build +/// new blocks manually by calling the `build_new_block` method. It uses the +/// Engine API to interact with the node and the provider to fetch blocks and +/// transactions. pub struct ChainDriver { engine_api: EngineApi, provider: RootProvider, @@ -36,8 +37,8 @@ pub struct ChainDriver { impl ChainDriver { const MIN_BLOCK_TIME: Duration = Duration::from_secs(1); - /// Creates a new ChainDriver instance for a local instance of RBuilder running in-process - /// communicating over IPC. + /// Creates a new ChainDriver instance for a local instance of RBuilder + /// running in-process communicating over IPC. pub async fn local(instance: &LocalInstance) -> eyre::Result> { Ok(ChainDriver:: { engine_api: instance.engine_api(), @@ -64,27 +65,28 @@ impl ChainDriver { } } - /// Specifies the block builder signing key used to sign builder transactions. - /// If not specified, a random signer will be used. + /// Specifies the block builder signing key used to sign builder + /// transactions. If not specified, a random signer will be used. pub fn with_signer(mut self, signer: Signer) -> Self { self.signer = Some(signer); self } - /// Specifies a custom gas limit for blocks being built, otherwise the limit is - /// set to a default value of 10_000_000. + /// Specifies a custom gas limit for blocks being built, otherwise the limit + /// is set to a default value of 10_000_000. pub fn with_gas_limit(mut self, gas_limit: u64) -> Self { self.gas_limit = Some(gas_limit); self } - /// Adds an external Optimism execution client node that will receive all newly built - /// blocks by this driver and ensure that they are valid. This validation process is - /// transparent and happens in the background when building new blocks. + /// Adds an external Optimism execution client node that will receive all + /// newly built blocks by this driver and ensure that they are valid. + /// This validation process is transparent and happens in the background + /// when building new blocks. /// - /// If there are validation nodes specified any newly built block will be submitted to - /// the validation EL and the driver will fail if the block is rejected by the - /// validation node. + /// If there are validation nodes specified any newly built block will be + /// submitted to the validation EL and the driver will fail if the block + /// is rejected by the validation node. pub async fn with_validation_node(mut self, node: ExternalNode) -> eyre::Result { node.catch_up_with(self.provider()).await?; self.validation_nodes.push(node); @@ -95,22 +97,22 @@ impl ChainDriver { // public test api impl ChainDriver { pub async fn build_new_block_with_no_tx_pool(&self) -> eyre::Result> { - self.build_new_block_with_txs_timestamp(vec![], Some(true), None, None) - .await + self.build_new_block_with_txs_timestamp(vec![], Some(true), None, None).await } - /// Builds a new block using the current state of the chain and the transactions in the pool. + /// Builds a new block using the current state of the chain and the + /// transactions in the pool. pub async fn build_new_block(&self) -> eyre::Result> { self.build_new_block_with_txs(vec![]).await } - /// Builds a new block with block_timestamp calculated as block time right before sending FCU + /// Builds a new block with block_timestamp calculated as block time right + /// before sending FCU pub async fn build_new_block_with_current_timestamp( &self, timestamp_jitter: Option, ) -> eyre::Result> { - self.build_new_block_with_txs_timestamp(vec![], None, None, timestamp_jitter) - .await + self.build_new_block_with_txs_timestamp(vec![], None, None, timestamp_jitter).await } /// Builds a new block with provided txs and timestamp @@ -127,10 +129,11 @@ impl ChainDriver { // Add L1 block info as the first transaction in every L2 block // This deposit transaction contains L1 block metadata required by the L2 chain // Currently using hardcoded data from L1 block 124665056 - // If this info is not provided, Reth cannot decode the receipt for any transaction - // in the block since it also includes this info as part of the result. - // It does not matter if the to address (4200000000000000000000000000000000000015) is - // not deployed on the L2 chain since Reth queries the block to get the info and not the contract. + // If this info is not provided, Reth cannot decode the receipt for any + // transaction in the block since it also includes this info as part of + // the result. It does not matter if the to address + // (4200000000000000000000000000000000000015) is not deployed on the L2 + // chain since Reth queries the block to get the info and not the contract. let block_info_tx: Bytes = { let deposit_tx = TxDeposit { source_hash: B256::default(), @@ -150,20 +153,23 @@ impl ChainDriver { }; let mut wait_until = None; - // If block_timestamp we need to produce new timestamp according to current clocks + // If block_timestamp we need to produce new timestamp according to current + // clocks let block_timestamp = if let Some(block_timestamp) = block_timestamp { block_timestamp.as_secs() } else { - // We take the following second, until which we will need to wait before issuing FCU + // We take the following second, until which we will need to wait before issuing + // FCU let latest_timestamp = (chrono::Utc::now().timestamp() + 1) as u64; wait_until = Some(latest_timestamp); - latest_timestamp - + Duration::from_millis(self.args.chain_block_time) + latest_timestamp + + Duration::from_millis(self.args.chain_block_time) .as_secs() .max(Self::MIN_BLOCK_TIME.as_secs()) }; - // This step will alight time at which we send FCU. ideally we must send FCU and the beginning of the second. + // This step will alight time at which we send FCU. ideally we must send FCU and + // the beginning of the second. if let Some(wait_until) = wait_until { let sleep_time = Duration::from_secs(wait_until).saturating_sub(Duration::from_millis( chrono::Utc::now().timestamp_millis() as u64, @@ -223,16 +229,14 @@ impl ChainDriver { .engine_api .new_payload(payload.clone(), vec![], B256::ZERO, Requests::default()) .await? - .status - != PayloadStatusEnum::Valid + .status != + PayloadStatusEnum::Valid { return Err(eyre::eyre!("Invalid validation status from builder")); } let new_block_hash = payload.payload_inner.payload_inner.payload_inner.block_hash; - self.engine_api - .update_forkchoice(latest.header.hash, new_block_hash, None) - .await?; + self.engine_api.update_forkchoice(latest.header.hash, new_block_hash, None).await?; let block = self .provider @@ -256,8 +260,9 @@ impl ChainDriver { Ok(block) } - /// Builds a new block using the current state of the chain and the transactions in the pool with a list - /// of mandatory builder transactions. Those are usually deposit transactions. + /// Builds a new block using the current state of the chain and the + /// transactions in the pool with a list of mandatory builder + /// transactions. Those are usually deposit transactions. pub async fn build_new_block_with_txs( &self, txs: Vec, @@ -266,8 +271,7 @@ impl ChainDriver { let latest_timestamp = Duration::from_secs(latest.header.timestamp); let block_timestamp = latest_timestamp + Self::MIN_BLOCK_TIME; - self.build_new_block_with_txs_timestamp(txs, None, Some(block_timestamp), None) - .await + self.build_new_block_with_txs_timestamp(txs, None, Some(block_timestamp), None).await } /// Retreives the latest built block and returns only a list of transaction @@ -289,8 +293,8 @@ impl ChainDriver { .ok_or_else(|| eyre::eyre!("Failed to get latest full block")) } - /// retreives a specific block by its number or tag and returns a list of transaction - /// hashes from its body. + /// retreives a specific block by its number or tag and returns a list of + /// transaction hashes from its body. pub async fn get_block( &self, number: BlockNumberOrTag, @@ -298,8 +302,8 @@ impl ChainDriver { Ok(self.provider.get_block_by_number(number).await?) } - /// retreives a specific block by its number or tag and returns a list of full transaction - /// contents in its body. + /// retreives a specific block by its number or tag and returns a list of + /// full transaction contents in its body. pub async fn get_block_full( &self, number: BlockNumberOrTag, @@ -307,7 +311,8 @@ impl ChainDriver { Ok(self.provider.get_block_by_number(number).full().await?) } - /// Returns a transaction builder that can be used to create and send transactions. + /// Returns a transaction builder that can be used to create and send + /// transactions. pub fn create_transaction(&self) -> TransactionBuilder { TransactionBuilder::new(self.provider.clone()) } @@ -323,16 +328,14 @@ impl ChainDriver { impl ChainDriver { async fn fcu(&self, attribs: OpPayloadAttributes) -> eyre::Result { let latest = self.latest().await?.header.hash; - let response = self - .engine_api - .update_forkchoice(latest, latest, Some(attribs)) - .await?; + let response = self.engine_api.update_forkchoice(latest, latest, Some(attribs)).await?; Ok(response) } } -// L1 block info for OP mainnet block 124665056 (stored in input of tx at index 0) +// L1 block info for OP mainnet block 124665056 (stored in input of tx at index +// 0) // // https://optimistic.etherscan.io/tx/0x312e290cf36df704a2217b015d6455396830b0ce678b860ebfcc30f41403d7b1 const FJORD_DATA: &[u8] = &hex!( diff --git a/crates/op-rbuilder/src/tests/framework/external.rs b/crates/op-rbuilder/src/tests/framework/external.rs index 8bb99edad..95eb5e680 100644 --- a/crates/op-rbuilder/src/tests/framework/external.rs +++ b/crates/op-rbuilder/src/tests/framework/external.rs @@ -27,15 +27,16 @@ use crate::tests::{EngineApi, Ipc}; const AUTH_CONTAINER_IPC_PATH: &str = "/home/op-reth-shared/auth.ipc"; const RPC_CONTAINER_IPC_PATH: &str = "/home/op-reth-shared/rpc.ipc"; -/// This type represents an Optimism execution client node that is running inside a -/// docker container. This node is used to validate the correctness of the blocks built -/// by op-rbuilder. +/// This type represents an Optimism execution client node that is running +/// inside a docker container. This node is used to validate the correctness of +/// the blocks built by op-rbuilder. /// -/// When this node is attached to a `ChainDriver`, it will automatically catch up with the -/// provided chain and will transparently ingest all newly built blocks by the driver. +/// When this node is attached to a `ChainDriver`, it will automatically catch +/// up with the provided chain and will transparently ingest all newly built +/// blocks by the driver. /// -/// If the built payload fails to validate, then the driver block production function will -/// return an error during `ChainDriver::build_new_block`. +/// If the built payload fails to validate, then the driver block production +/// function will return an error during `ChainDriver::build_new_block`. pub struct ExternalNode { engine_api: EngineApi, provider: RootProvider, @@ -45,8 +46,8 @@ pub struct ExternalNode { } impl ExternalNode { - /// Creates a new instance of `ExternalNode` that runs the `op-reth` client in a Docker container - /// using the specified version tag. + /// Creates a new instance of `ExternalNode` that runs the `op-reth` client + /// in a Docker container using the specified version tag. pub async fn reth_version(version_tag: &str) -> eyre::Result { let docker = Docker::connect_with_local_defaults()?; @@ -61,18 +62,13 @@ impl ExternalNode { std::fs::create_dir_all(&tempdir) .map_err(|_| eyre::eyre!("Failed to create temporary directory"))?; - std::fs::write( - tempdir.join("genesis.json"), - include_str!("./artifacts/genesis.json.tmpl"), - ) - .map_err(|_| eyre::eyre!("Failed to write genesis file"))?; + std::fs::write(tempdir.join("genesis.json"), include_str!("./artifacts/genesis.json.tmpl")) + .map_err(|_| eyre::eyre!("Failed to write genesis file"))?; // Create Docker container with reth EL client let container = create_container(&tempdir, &docker, version_tag).await?; - docker - .start_container(&container.id, None::>) - .await?; + docker.start_container(&container.id, None::>).await?; // Wait for the container to be ready and IPCs to be created await_ipc_readiness(&docker, &container.id).await?; @@ -101,17 +97,11 @@ impl ExternalNode { } }); - Ok(Self { - engine_api, - provider, - docker, - tempdir, - container_id: container.id, - }) + Ok(Self { engine_api, provider, docker, tempdir, container_id: container.id }) } - /// Creates a new instance of `ExternalNode` that runs the `op-reth` client in a Docker container - /// using the latest version. + /// Creates a new instance of `ExternalNode` that runs the `op-reth` client + /// in a Docker container using the latest version. pub async fn reth() -> eyre::Result { Self::reth_version("latest").await } @@ -132,8 +122,8 @@ impl ExternalNode { impl ExternalNode { /// Catches up this node with another node. /// - /// This method will fail if this node is ahead of the provided chain or they do not - /// share the same genesis block. + /// This method will fail if this node is ahead of the provided chain or + /// they do not share the same genesis block. pub async fn catch_up_with(&self, chain: &RootProvider) -> eyre::Result<()> { // check if we need to catch up let (latest_hash, latest_number) = chain.latest_block_hash_and_number().await?; @@ -180,9 +170,7 @@ impl ExternalNode { let mut our_current_height = our_latest_number + 1; while our_current_height <= latest_number { - let payload = chain - .execution_payload_for_block(our_current_height) - .await?; + let payload = chain.execution_payload_for_block(our_current_height).await?; let (latest_hash, _) = self.provider().latest_block_hash_and_number().await?; @@ -199,9 +187,7 @@ impl ExternalNode { } let new_chain_hash = status.latest_valid_hash.unwrap_or_default(); - self.engine_api() - .update_forkchoice(latest_hash, new_chain_hash, None) - .await?; + self.engine_api().update_forkchoice(latest_hash, new_chain_hash, None).await?; our_current_height += 1; } @@ -222,8 +208,8 @@ impl ExternalNode { Ok(()) } - /// Posts a block to the external validation node for validation and sets it as the latest block - /// in the fork choice. + /// Posts a block to the external validation node for validation and sets it + /// as the latest block in the fork choice. pub async fn post_block(&self, payload: &OpExecutionPayloadV4) -> eyre::Result<()> { let result = self .engine_api @@ -244,9 +230,7 @@ impl ExternalNode { let (latest_hash, _) = self.provider.latest_block_hash_and_number().await?; - self.engine_api - .update_forkchoice(latest_hash, new_block_hash, None) - .await?; + self.engine_api.update_forkchoice(latest_hash, new_block_hash, None).await?; Ok(()) } @@ -270,10 +254,7 @@ async fn create_container( version_tag: &str, ) -> eyre::Result { let host_config = HostConfig { - binds: Some(vec![format!( - "{}:/home/op-reth-shared:rw", - tempdir.display() - )]), + binds: Some(vec![format!("{}:/home/op-reth-shared:rw", tempdir.display())]), ..Default::default() }; @@ -289,10 +270,7 @@ async fn create_container( ); while let Some(pull_result) = pull_stream.try_next().await? { - debug!( - "Pulling 'ghcr.io/paradigmxyz/op-reth:{version_tag}' locally: {:?}", - pull_result - ); + debug!("Pulling 'ghcr.io/paradigmxyz/op-reth:{version_tag}' locally: {:?}", pull_result); } // Don't expose any ports, as we will only use IPC for communication. @@ -321,10 +299,7 @@ async fn create_container( }; Ok(docker - .create_container( - Some(CreateContainerOptions::::default()), - container_config, - ) + .create_container(Some(CreateContainerOptions::::default()), container_config) .await?) } @@ -406,41 +381,30 @@ async fn await_ipc_readiness(docker: &Docker, container: &str) -> eyre::Result<( } if !auth_ipc_started || !rpc_ipc_started { - return Err(eyre::eyre!( - "Failed to start op-reth container: IPCs not ready" - )); + return Err(eyre::eyre!("Failed to start op-reth container: IPCs not ready")); } Ok(()) } async fn cleanup(tempdir: PathBuf, docker: Docker, container_id: String) { - // This is a no-op function that will be spawned to clean up the container on ctrl-c - // or Drop. - debug!( - "Cleaning up external node resources at {} [{container_id}]...", - tempdir.display() - ); + // This is a no-op function that will be spawned to clean up the container on + // ctrl-c or Drop. + debug!("Cleaning up external node resources at {} [{container_id}]...", tempdir.display()); if !tempdir.exists() { return; // If the tempdir does not exist, there's nothing to clean up. } // Block on cleaning up the container - if let Err(e) = docker - .stop_container(&container_id, None::) - .await - { + if let Err(e) = docker.stop_container(&container_id, None::).await { warn!("Failed to stop container {}: {}", container_id, e); } if let Err(e) = docker .remove_container( &container_id, - Some(RemoveContainerOptions { - force: true, - ..Default::default() - }), + Some(RemoveContainerOptions { force: true, ..Default::default() }), ) .await { diff --git a/crates/op-rbuilder/src/tests/framework/instance.rs b/crates/op-rbuilder/src/tests/framework/instance.rs index 7a9ba14b4..c2d84b573 100644 --- a/crates/op-rbuilder/src/tests/framework/instance.rs +++ b/crates/op-rbuilder/src/tests/framework/instance.rs @@ -47,8 +47,9 @@ use tokio_tungstenite::{connect_async, tungstenite::Message}; use tokio_util::sync::CancellationToken; use tracing::warn; -/// Represents a type that emulates a local in-process instance of the OP builder node. -/// This node uses IPC as the communication channel for the RPC server Engine API. +/// Represents a type that emulates a local in-process instance of the OP +/// builder node. This node uses IPC as the communication channel for the RPC +/// server Engine API. pub struct LocalInstance { signer: Signer, config: NodeConfig, @@ -60,20 +61,20 @@ pub struct LocalInstance { } impl LocalInstance { - /// Creates a new local instance of the OP builder node with the given arguments, - /// with the default Reth node configuration. + /// Creates a new local instance of the OP builder node with the given + /// arguments, with the default Reth node configuration. /// - /// This method does not prefund any accounts, so before sending any transactions - /// make sure that sender accounts are funded. + /// This method does not prefund any accounts, so before sending any + /// transactions make sure that sender accounts are funded. pub async fn new(args: OpRbuilderArgs) -> eyre::Result { Box::pin(Self::new_with_config::

(args, default_node_config())).await } - /// Creates a new local instance of the OP builder node with the given arguments, - /// with a given Reth node configuration. + /// Creates a new local instance of the OP builder node with the given + /// arguments, with a given Reth node configuration. /// - /// This method does not prefund any accounts, so before sending any transactions - /// make sure that sender accounts are funded. + /// This method does not prefund any accounts, so before sending any + /// transactions make sure that sender accounts are funded. pub async fn new_with_config( args: OpRbuilderArgs, config: NodeConfig, @@ -90,9 +91,7 @@ impl LocalInstance { let signer = args.builder_signer.unwrap_or_else(|| { Signer::try_from_secret( - BUILDER_PRIVATE_KEY - .parse() - .expect("Invalid builder private key"), + BUILDER_PRIVATE_KEY.parse().expect("Invalid builder private key"), ) .expect("Failed to create signer from private key") }); @@ -138,8 +137,7 @@ impl LocalInstance { reverted_cache, ); - ctx.modules - .add_or_replace_configured(revert_protection_ext.into_rpc())?; + ctx.modules.add_or_replace_configured(revert_protection_ext.into_rpc())?; } Ok(()) @@ -163,9 +161,7 @@ impl LocalInstance { // Wait for all required components to be ready rpc_ready_rx.await.expect("Failed to receive ready signal"); - let pool_monitor = txpool_ready_rx - .await - .expect("Failed to receive txpool ready signal"); + let pool_monitor = txpool_ready_rx.await.expect("Failed to receive txpool ready signal"); Ok(Self { args, @@ -178,23 +174,21 @@ impl LocalInstance { }) } - /// Creates new local instance of the OP builder node with the standard builder configuration. - /// This method prefunds the default accounts with 1 ETH each. + /// Creates new local instance of the OP builder node with the standard + /// builder configuration. This method prefunds the default accounts + /// with 1 ETH each. pub async fn standard() -> eyre::Result { let args = crate::args::Cli::parse_from(["dummy", "node"]); - let Commands::Node(ref node_command) = args.command else { - unreachable!() - }; + let Commands::Node(ref node_command) = args.command else { unreachable!() }; Self::new::(node_command.ext.clone()).await } - /// Creates new local instance of the OP builder node with the flashblocks builder configuration. - /// This method prefunds the default accounts with 1 ETH each. + /// Creates new local instance of the OP builder node with the flashblocks + /// builder configuration. This method prefunds the default accounts + /// with 1 ETH each. pub async fn flashblocks() -> eyre::Result { let mut args = crate::args::Cli::parse_from(["dummy", "node"]); - let Commands::Node(ref mut node_command) = args.command else { - unreachable!() - }; + let Commands::Node(ref mut node_command) = args.command else { unreachable!() }; node_command.ext.flashblocks.enabled = true; node_command.ext.flashblocks.flashblocks_port = 0; // use random os assigned port Self::new::(node_command.ext.clone()).await @@ -220,11 +214,7 @@ impl LocalInstance { .parse() .expect("Failed to parse flashblocks IP address"); - let ipaddr = if ipaddr.is_unspecified() { - Ipv4Addr::LOCALHOST - } else { - ipaddr - }; + let ipaddr = if ipaddr.is_unspecified() { Ipv4Addr::LOCALHOST } else { ipaddr }; let port = self.args.flashblocks.flashblocks_port; @@ -268,10 +258,7 @@ impl Drop for LocalInstance { if let Some(task_manager) = self.task_manager.take() { task_manager.graceful_shutdown_with_timeout(Duration::from_secs(3)); std::fs::remove_dir_all(self.config().datadir().to_string()).unwrap_or_else(|e| { - panic!( - "Failed to remove temporary data directory {}: {e}", - self.config().datadir() - ) + panic!("Failed to remove temporary data directory {}: {e}", self.config().datadir()) }); } } @@ -289,19 +276,13 @@ pub fn default_node_config() -> NodeConfig { let tempdir = std::env::temp_dir(); let random_id = nanoid!(); - let data_path = tempdir - .join(format!("rbuilder.{random_id}.datadir")) - .to_path_buf(); + let data_path = tempdir.join(format!("rbuilder.{random_id}.datadir")).to_path_buf(); std::fs::create_dir_all(&data_path).expect("Failed to create temporary data directory"); - let rpc_ipc_path = tempdir - .join(format!("rbuilder.{random_id}.rpc-ipc")) - .to_path_buf(); + let rpc_ipc_path = tempdir.join(format!("rbuilder.{random_id}.rpc-ipc")).to_path_buf(); - let auth_ipc_path = tempdir - .join(format!("rbuilder.{random_id}.auth-ipc")) - .to_path_buf(); + let auth_ipc_path = tempdir.join(format!("rbuilder.{random_id}.auth-ipc")).to_path_buf(); let mut rpc = RpcServerArgs::default().with_auth_ipc(); rpc.ws = false; @@ -314,10 +295,7 @@ pub fn default_node_config() -> NodeConfig { network.discovery.disable_discovery = true; let datadir = DatadirArgs { - datadir: data_path - .to_string_lossy() - .parse() - .expect("Failed to parse data dir path"), + datadir: data_path.to_string_lossy().parse().expect("Failed to parse data dir path"), static_files_path: None, }; @@ -350,16 +328,14 @@ fn pool_component(args: &OpRbuilderArgs) -> OpPoolBuilder { // to garbage collect transactions out of the bundle range. rollup_args.enable_tx_conditional || args.enable_revert_protection, ) - .with_supervisor( - rollup_args.supervisor_http.clone(), - rollup_args.supervisor_safety_level, - ) + .with_supervisor(rollup_args.supervisor_http.clone(), rollup_args.supervisor_safety_level) } /// A utility for listening to flashblocks WebSocket messages during tests. /// -/// This provides a reusable way to capture and inspect flashblocks that are produced -/// during test execution, eliminating the need for duplicate WebSocket listening code. +/// This provides a reusable way to capture and inspect flashblocks that are +/// produced during test execution, eliminating the need for duplicate WebSocket +/// listening code. pub struct FlashblocksListener { pub flashblocks: Arc>>, pub cancellation_token: CancellationToken, @@ -367,9 +343,11 @@ pub struct FlashblocksListener { } impl FlashblocksListener { - /// Create a new flashblocks listener that connects to the given WebSocket URL. + /// Create a new flashblocks listener that connects to the given WebSocket + /// URL. /// - /// The listener will automatically parse incoming messages as FlashblocksPayloadV1. + /// The listener will automatically parse incoming messages as + /// FlashblocksPayloadV1. fn new(flashblocks_ws_url: String) -> Self { let flashblocks = Arc::new(Mutex::new(Vec::new())); let cancellation_token = CancellationToken::new(); @@ -395,11 +373,7 @@ impl FlashblocksListener { } }); - Self { - flashblocks, - cancellation_token, - handle, - } + Self { flashblocks, cancellation_token, handle } } /// Get a snapshot of all received flashblocks @@ -409,21 +383,17 @@ impl FlashblocksListener { /// Find a flashblock by index pub fn find_flashblock(&self, index: u64) -> Option { - self.flashblocks - .lock() - .iter() - .find(|fb| fb.index == index) - .cloned() + self.flashblocks.lock().iter().find(|fb| fb.index == index).cloned() } /// Check if any flashblock contains the given transaction hash pub fn contains_transaction(&self, tx_hash: &B256) -> bool { let tx_hash_str = format!("{tx_hash:#x}"); self.flashblocks.lock().iter().any(|fb| { - if let Some(receipts) = fb.metadata.get("receipts") { - if let Some(receipts_obj) = receipts.as_object() { - return receipts_obj.contains_key(&tx_hash_str); - } + if let Some(receipts) = fb.metadata.get("receipts") && + let Some(receipts_obj) = receipts.as_object() + { + return receipts_obj.contains_key(&tx_hash_str); } false }) @@ -433,12 +403,11 @@ impl FlashblocksListener { pub fn find_transaction_flashblock(&self, tx_hash: &B256) -> Option { let tx_hash_str = format!("{tx_hash:#x}"); self.flashblocks.lock().iter().find_map(|fb| { - if let Some(receipts) = fb.metadata.get("receipts") { - if let Some(receipts_obj) = receipts.as_object() { - if receipts_obj.contains_key(&tx_hash_str) { - return Some(fb.index); - } - } + if let Some(receipts) = fb.metadata.get("receipts") && + let Some(receipts_obj) = receipts.as_object() && + receipts_obj.contains_key(&tx_hash_str) + { + return Some(fb.index); } None }) diff --git a/crates/op-rbuilder/src/tests/framework/macros/src/lib.rs b/crates/op-rbuilder/src/tests/framework/macros/src/lib.rs index daa88c7e7..7fae8f47a 100644 --- a/crates/op-rbuilder/src/tests/framework/macros/src/lib.rs +++ b/crates/op-rbuilder/src/tests/framework/macros/src/lib.rs @@ -55,7 +55,8 @@ fn get_variant_names() -> Vec<&'static str> { } struct TestConfig { - variants: std::collections::HashMap>, // variant name -> custom expression (None = default) + variants: std::collections::HashMap>, /* variant name -> custom + * expression (None = default) */ args: Option, // Expression to pass to LocalInstance::new() config: Option, // NodeConfig for new_with_config multi_threaded: bool, // Whether to use multi_thread flavor @@ -148,8 +149,8 @@ impl syn::parse::Parse for TestConfig { } // If only args/config/multi_threaded is specified, generate all variants - if config.variants.is_empty() - && (config.args.is_some() || config.config.is_some() || config.multi_threaded) + if config.variants.is_empty() && + (config.args.is_some() || config.config.is_some() || config.multi_threaded) { for variant in BUILDER_VARIANTS { config.variants.insert(variant.name.to_string(), None); @@ -207,9 +208,7 @@ pub fn rb_test(args: TokenStream, input: TokenStream) -> TokenStream { // Create the original function without test attributes (helper function) let mut helper_fn = input_fn.clone(); - helper_fn - .attrs - .retain(|attr| !attr.path().is_ident("test") && !attr.path().is_ident("tokio")); + helper_fn.attrs.retain(|attr| !attr.path().is_ident("test") && !attr.path().is_ident("tokio")); let original_name = &input_fn.sig.ident; let mut generated_functions = vec![quote! { #helper_fn }]; @@ -260,12 +259,7 @@ fn validate_signature(item_fn: &ItemFn) { panic!("Function must have exactly one parameter of type LocalInstance."); } - let output_types = item_fn - .sig - .output - .to_token_stream() - .to_string() - .replace(" ", ""); + let output_types = item_fn.sig.output.to_token_stream().to_string().replace(" ", ""); if output_types != "->eyre::Result<()>" { panic!("Function must return Result<(), eyre::Error>. Actual: {output_types}",); diff --git a/crates/op-rbuilder/src/tests/framework/mod.rs b/crates/op-rbuilder/src/tests/framework/mod.rs index d9c7bf1fb..27e6296ca 100644 --- a/crates/op-rbuilder/src/tests/framework/mod.rs +++ b/crates/op-rbuilder/src/tests/framework/mod.rs @@ -23,8 +23,8 @@ pub const DEFAULT_JWT_TOKEN: &str = pub const ONE_ETH: u128 = 1_000_000_000_000_000_000; -/// This gets invoked before any tests, when the cargo test framework loads the test library. -/// It injects itself into +/// This gets invoked before any tests, when the cargo test framework loads the +/// test library. It injects itself into #[ctor::ctor] fn init_tests() { use tracing_subscriber::{filter::filter_fn, prelude::*}; @@ -45,10 +45,8 @@ fn init_tests() { tracing_subscriber::registry() .with(tracing_subscriber::fmt::layer()) .with(filter_fn(move |metadata| { - metadata.level() <= &level - && !prefix_blacklist - .iter() - .any(|prefix| metadata.target().starts_with(prefix)) + metadata.level() <= &level && + !prefix_blacklist.iter().any(|prefix| metadata.target().starts_with(prefix)) })) .init(); } diff --git a/crates/op-rbuilder/src/tests/framework/txs.rs b/crates/op-rbuilder/src/tests/framework/txs.rs index 7c91892ad..0402e3434 100644 --- a/crates/op-rbuilder/src/tests/framework/txs.rs +++ b/crates/op-rbuilder/src/tests/framework/txs.rs @@ -84,11 +84,7 @@ impl TransactionBuilder { signer: None, nonce: None, base_fee: None, - tx: TxEip1559 { - chain_id: 901, - gas_limit: 210000, - ..Default::default() - }, + tx: TxEip1559 { chain_id: 901, gas_limit: 210000, ..Default::default() }, bundle_opts: None, with_reverted_hash: false, key: None, @@ -205,9 +201,7 @@ impl TransactionBuilder { self.tx.nonce = nonce; self.tx.max_fee_per_gas = base_fee + self.tx.max_priority_fee_per_gas; - signer - .sign_tx(OpTypedTransaction::Eip1559(self.tx)) - .expect("Failed to sign transaction") + signer.sign_tx(OpTypedTransaction::Eip1559(self.tx)).expect("Failed to sign transaction") } pub async fn send(self) -> eyre::Result> { @@ -222,11 +216,7 @@ impl TransactionBuilder { // Send the transaction as a bundle with the bundle options let bundle = Bundle { transactions: vec![transaction_encoded.into()], - reverting_hashes: if with_reverted_hash { - Some(vec![txn_hash]) - } else { - None - }, + reverting_hashes: if with_reverted_hash { Some(vec![txn_hash]) } else { None }, block_number_min: bundle_opts.block_number_min, block_number_max: bundle_opts.block_number_max, flashblock_number_min: bundle_opts.flashblock_number_min, @@ -235,27 +225,21 @@ impl TransactionBuilder { max_timestamp: bundle_opts.max_timestamp, }; - let result: BundleResult = provider - .client() - .request("eth_sendBundle", (bundle,)) - .await?; + let result: BundleResult = + provider.client().request("eth_sendBundle", (bundle,)).await?; - return Ok(PendingTransactionBuilder::new( - provider.root().clone(), - result.bundle_hash, - )); + return Ok(PendingTransactionBuilder::new(provider.root().clone(), result.bundle_hash)); } - Ok(provider - .send_raw_transaction(transaction_encoded.as_slice()) - .await?) + Ok(provider.send_raw_transaction(transaction_encoded.as_slice()).await?) } } type ObservationsMap = DashMap>; pub struct TransactionPoolObserver { - /// Stores a mapping of all observed transactions to their history of events. + /// Stores a mapping of all observed transactions to their history of + /// events. observations: Arc, /// Fired when this type is dropped, giving a signal to the listener loop @@ -328,16 +312,11 @@ impl TransactionPoolObserver { } }); - Self { - observations, - term: Some(term), - } + Self { observations, term: Some(term) } } pub fn tx_status(&self, txhash: TxHash) -> Option { - self.observations - .get(&txhash) - .and_then(|history| history.back().cloned()) + self.observations.get(&txhash).and_then(|history| history.back().cloned()) } pub fn is_pending(&self, txhash: TxHash) -> bool { @@ -353,10 +332,7 @@ impl TransactionPoolObserver { } pub fn count(&self, status: TransactionEvent) -> usize { - self.observations - .iter() - .filter(|tx| tx.value().back() == Some(&status)) - .count() + self.observations.iter().filter(|tx| tx.value().back() == Some(&status)).count() } pub fn pending_count(&self) -> usize { @@ -373,9 +349,7 @@ impl TransactionPoolObserver { /// Returns the history of pool events for a transaction. pub fn history(&self, txhash: TxHash) -> Option> { - self.observations - .get(&txhash) - .map(|history| history.iter().cloned().collect()) + self.observations.get(&txhash).map(|history| history.iter().cloned().collect()) } pub fn print_all(&self) { diff --git a/crates/op-rbuilder/src/tests/framework/utils.rs b/crates/op-rbuilder/src/tests/framework/utils.rs index a71361d2e..fec6b266e 100644 --- a/crates/op-rbuilder/src/tests/framework/utils.rs +++ b/crates/op-rbuilder/src/tests/framework/utils.rs @@ -37,7 +37,8 @@ impl TransactionBuilderExt for TransactionBuilder { // This transaction is big in the sense that it uses a lot of gas. The exact // amount it uses is 86220 gas. fn random_big_transaction(self) -> Self { - // PUSH13 0x63ffffffff60005260046000f3 PUSH1 0x00 MSTORE PUSH1 0x02 PUSH1 0x0d PUSH1 0x13 PUSH1 0x00 CREATE2 + // PUSH13 0x63ffffffff60005260046000f3 PUSH1 0x00 MSTORE PUSH1 0x02 PUSH1 0x0d + // PUSH1 0x13 PUSH1 0x00 CREATE2 self.with_create() .with_input(hex!("6c63ffffffff60005260046000f36000526002600d60136000f5").into()) } @@ -53,9 +54,7 @@ pub trait ChainDriverExt { fn fund(&self, address: Address, amount: u128) -> impl Future>; fn first_funded_address(&self) -> Address { - FUNDED_PRIVATE_KEYS[0] - .parse() - .expect("Invalid funded private key") + FUNDED_PRIVATE_KEYS[0].parse().expect("Invalid funded private key") } fn fund_accounts( @@ -65,8 +64,7 @@ pub trait ChainDriverExt { ) -> impl Future>> { async move { let accounts = (0..count).map(|_| Signer::random()).collect::>(); - self.fund_many(accounts.iter().map(|a| a.address).collect(), amount) - .await?; + self.fund_many(accounts.iter().map(|a| a.address).collect(), amount).await?; Ok(accounts) } } @@ -128,32 +126,20 @@ impl ChainDriverExt for ChainDriver

{ let signer = Signer::random(); let signed_tx = signer.sign_tx(OpTypedTransaction::Deposit(deposit))?; let signed_tx_rlp = signed_tx.encoded_2718(); - Ok(self - .build_new_block_with_txs(vec![signed_tx_rlp.into()]) - .await? - .header - .hash) + Ok(self.build_new_block_with_txs(vec![signed_tx_rlp.into()]).await?.header.hash) } async fn build_new_block_with_valid_transaction( &self, ) -> eyre::Result<(TxHash, Block)> { - let tx = self - .create_transaction() - .random_valid_transfer() - .send() - .await?; + let tx = self.create_transaction().random_valid_transfer().send().await?; Ok((*tx.tx_hash(), self.build_new_block().await?)) } async fn build_new_block_with_reverrting_transaction( &self, ) -> eyre::Result<(TxHash, Block)> { - let tx = self - .create_transaction() - .random_reverting_transaction() - .send() - .await?; + let tx = self.create_transaction().random_reverting_transaction().send().await?; Ok((*tx.tx_hash(), self.build_new_block().await?)) } @@ -165,18 +151,14 @@ pub trait BlockTransactionsExt { impl BlockTransactionsExt for Block { fn includes(&self, txs: &impl AsTxs) -> bool { - txs.as_txs() - .into_iter() - .all(|tx| self.transactions.hashes().any(|included| included == tx)) + txs.as_txs().into_iter().all(|tx| self.transactions.hashes().any(|included| included == tx)) } } impl BlockTransactionsExt for BlockTransactionHashes<'_, Transaction> { fn includes(&self, txs: &impl AsTxs) -> bool { let mut included_tx_iter = self.clone(); - txs.as_txs() - .iter() - .all(|tx| included_tx_iter.any(|included| included == *tx)) + txs.as_txs().iter().all(|tx| included_tx_iter.any(|included| included == *tx)) } } @@ -212,10 +194,8 @@ pub fn create_test_db(config: NodeConfig) -> Arc::from( reth_db::test_utils::tempdir_path(), ); - let db_config = config.with_datadir_args(DatadirArgs { - datadir: path.clone(), - ..Default::default() - }); + let db_config = + config.with_datadir_args(DatadirArgs { datadir: path.clone(), ..Default::default() }); let data_dir = path.unwrap_or_chain_default(db_config.chain.chain(), db_config.datadir.clone()); let path = data_dir.db(); let db = init_db( diff --git a/crates/op-rbuilder/src/tests/gas_limiter.rs b/crates/op-rbuilder/src/tests/gas_limiter.rs index 00dfb0353..6a49f26e2 100644 --- a/crates/op-rbuilder/src/tests/gas_limiter.rs +++ b/crates/op-rbuilder/src/tests/gas_limiter.rs @@ -23,9 +23,7 @@ async fn gas_limiter_blocks_excessive_usage(rbuilder: LocalInstance) -> eyre::Re let driver = rbuilder.driver().await?; // Fund some accounts for testing - let funded_accounts = driver - .fund_accounts(2, 10_000_000_000_000_000_000u128) - .await?; // 10 ETH each + let funded_accounts = driver.fund_accounts(2, 10_000_000_000_000_000_000u128).await?; // 10 ETH each // These transactions should not be throttled let tx1 = driver @@ -49,7 +47,8 @@ async fn gas_limiter_blocks_excessive_usage(rbuilder: LocalInstance) -> eyre::Re assert!(tx_hashes.contains(tx1.tx_hash()), "tx1 should be included"); assert!(tx_hashes.contains(tx2.tx_hash()), "tx2 should be included"); - // Send multiple big transactions from the same address - these should hit the gas limiter + // Send multiple big transactions from the same address - these should hit the + // gas limiter let mut sent_txs = Vec::new(); for i in 0..5 { let big_tx = driver @@ -59,11 +58,7 @@ async fn gas_limiter_blocks_excessive_usage(rbuilder: LocalInstance) -> eyre::Re .send() .await?; sent_txs.push(*big_tx.tx_hash()); - info!( - "Sent big transaction {} from address {}", - i + 1, - funded_accounts[0].address - ); + info!("Sent big transaction {} from address {}", i + 1, funded_accounts[0].address); } // Meanwhile, the other address should not be throttled @@ -87,10 +82,7 @@ async fn gas_limiter_blocks_excessive_usage(rbuilder: LocalInstance) -> eyre::Re "Gas limiter should have rejected some transactions, included: {}/5", included_count ); - assert!( - included_count > 0, - "Gas limiter should have allowed at least one transaction" - ); + assert!(included_count > 0, "Gas limiter should have allowed at least one transaction"); assert!( tx_hashes.contains(legit_tx.tx_hash()), diff --git a/crates/op-rbuilder/src/tests/ordering.rs b/crates/op-rbuilder/src/tests/ordering.rs index bba31b0ef..fe26921dc 100644 --- a/crates/op-rbuilder/src/tests/ordering.rs +++ b/crates/op-rbuilder/src/tests/ordering.rs @@ -3,10 +3,11 @@ use alloy_consensus::Transaction; use futures::{StreamExt, future::join_all, stream}; use macros::rb_test; -/// This test ensures that the transactions are ordered by fee priority in the block. -/// This version of the test is only applicable to the standard builder because in flashblocks -/// the transaction order is commited by the block after each flashblock is produced, -/// so the order is only going to hold within one flashblock, but not the entire block. +/// This test ensures that the transactions are ordered by fee priority in the +/// block. This version of the test is only applicable to the standard builder +/// because in flashblocks the transaction order is commited by the block after +/// each flashblock is produced, so the order is only going to hold within one +/// flashblock, but not the entire block. #[rb_test(standard)] async fn fee_priority_ordering(rbuilder: LocalInstance) -> eyre::Result<()> { let driver = rbuilder.driver().await?; @@ -63,10 +64,7 @@ async fn fee_priority_ordering(rbuilder: LocalInstance) -> eyre::Result<()> { .rev() // we want to check descending order .collect::>(); - assert!( - txs_tips.is_sorted(), - "Transactions not ordered by fee priority" - ); + assert!(txs_tips.is_sorted(), "Transactions not ordered by fee priority"); Ok(()) } diff --git a/crates/op-rbuilder/src/tests/revert.rs b/crates/op-rbuilder/src/tests/revert.rs index 76e1c5b9b..7ac85ed1a 100644 --- a/crates/op-rbuilder/src/tests/revert.rs +++ b/crates/op-rbuilder/src/tests/revert.rs @@ -11,9 +11,9 @@ use crate::{ }, }; -/// This test ensures that the transactions that get reverted and not included in the block, -/// are eventually dropped from the pool once their block range is reached. -/// This test creates N transactions with different block ranges. +/// This test ensures that the transactions that get reverted and not included +/// in the block, are eventually dropped from the pool once their block range is +/// reached. This test creates N transactions with different block ranges. #[rb_test(args = OpRbuilderArgs { enable_revert_protection: true, ..Default::default() @@ -53,9 +53,10 @@ async fn monitor_transaction_gc(rbuilder: LocalInstance) -> eyre::Result<()> { assert_eq!(generated_block.transactions.len(), 3); } - // since we created the 10 transactions with increasing block ranges, as we generate blocks - // one transaction will be gc on each block. - // transactions from [0, i] should be dropped, transactions from [i+1, 10] should be queued + // since we created the 10 transactions with increasing block ranges, as we + // generate blocks one transaction will be gc on each block. + // transactions from [0, i] should be dropped, transactions from [i+1, 10] + // should be queued for j in 0..=i { assert!(rbuilder.pool().is_dropped(*pending_txn[j].tx_hash())); } @@ -67,23 +68,17 @@ async fn monitor_transaction_gc(rbuilder: LocalInstance) -> eyre::Result<()> { Ok(()) } -/// If revert protection is disabled, the transactions that revert are included in the block. +/// If revert protection is disabled, the transactions that revert are included +/// in the block. #[rb_test] async fn disabled(rbuilder: LocalInstance) -> eyre::Result<()> { let driver = rbuilder.driver().await?; for _ in 0..10 { - let valid_tx = driver - .create_transaction() - .random_valid_transfer() - .send() - .await?; + let valid_tx = driver.create_transaction().random_valid_transfer().send().await?; - let reverting_tx = driver - .create_transaction() - .random_reverting_transaction() - .send() - .await?; + let reverting_tx = + driver.create_transaction().random_reverting_transaction().send().await?; let block = driver.build_new_block().await?; assert!(block.includes(valid_tx.tx_hash())); @@ -93,29 +88,22 @@ async fn disabled(rbuilder: LocalInstance) -> eyre::Result<()> { Ok(()) } -/// If revert protection is disabled, it should not be possible to send a revert bundle -/// since the revert RPC endpoint is not available. +/// If revert protection is disabled, it should not be possible to send a revert +/// bundle since the revert RPC endpoint is not available. #[rb_test] async fn disabled_bundle_endpoint_error(rbuilder: LocalInstance) -> eyre::Result<()> { let driver = rbuilder.driver().await?; - let res = driver - .create_transaction() - .with_bundle(BundleOpts::default()) - .send() - .await; + let res = driver.create_transaction().with_bundle(BundleOpts::default()).send().await; - assert!( - res.is_err(), - "Expected error because method is not available" - ); + assert!(res.is_err(), "Expected error because method is not available"); Ok(()) } -/// Test the behaviour of the revert protection bundle, if the bundle **does not** revert -/// the transaction is included in the block. If the bundle reverts, the transaction -/// is not included in the block and tried again for the next bundle range blocks -/// when it will be dropped from the pool. +/// Test the behaviour of the revert protection bundle, if the bundle **does +/// not** revert the transaction is included in the block. If the bundle +/// reverts, the transaction is not included in the block and tried again for +/// the next bundle range blocks when it will be dropped from the pool. #[rb_test(args = OpRbuilderArgs { enable_revert_protection: true, ..Default::default() @@ -133,12 +121,7 @@ async fn bundle(rbuilder: LocalInstance) -> eyre::Result<()> { .await?; let block2 = driver.build_new_block().await?; // Block 2 - assert!( - block2 - .transactions - .hashes() - .includes(valid_bundle.tx_hash()) - ); + assert!(block2.transactions.hashes().includes(valid_bundle.tx_hash())); let bundle_opts = BundleOpts::default().with_block_number_max(4); @@ -156,7 +139,8 @@ async fn bundle(rbuilder: LocalInstance) -> eyre::Result<()> { // After the block the transaction is still pending in the pool assert!(rbuilder.pool().is_pending(*reverted_bundle.tx_hash())); - // Test 3: Chain progresses beyond the bundle range. The transaction is dropped from the pool + // Test 3: Chain progresses beyond the bundle range. The transaction is dropped + // from the pool driver.build_new_block().await?; // Block 4 assert!(rbuilder.pool().is_dropped(*reverted_bundle.tx_hash())); @@ -194,11 +178,7 @@ async fn bundle_min_block_number(rbuilder: LocalInstance) -> eyre::Result<()> { .create_transaction() .with_revert() .with_reverted_hash() - .with_bundle( - BundleOpts::default() - .with_block_number_max(4) - .with_block_number_min(4), - ) + .with_bundle(BundleOpts::default().with_block_number_max(4).with_block_number_min(4)) .send() .await?; @@ -220,7 +200,8 @@ async fn bundle_min_timestamp(rbuilder: LocalInstance) -> eyre::Result<()> { let driver = rbuilder.driver().await?; let initial_timestamp = driver.latest().await?.header.timestamp; - // The bundle is valid when the min timestamp is equal to the current block's timestamp + // The bundle is valid when the min timestamp is equal to the current block's + // timestamp let bundle_with_min_timestamp = driver .create_transaction() .with_revert() // the transaction reverts but it is included in the block @@ -229,7 +210,8 @@ async fn bundle_min_timestamp(rbuilder: LocalInstance) -> eyre::Result<()> { .send() .await?; - // Each block advances the timestamp by block_time_secs which is 1 when chain_block_time isn't set + // Each block advances the timestamp by block_time_secs which is 1 when + // chain_block_time isn't set let block = driver.build_new_block().await?; // Block 1, initial_timestamp + 1 assert!(!block.includes(bundle_with_min_timestamp.tx_hash())); @@ -257,22 +239,15 @@ async fn bundle_range_limits(rbuilder: LocalInstance) -> eyre::Result<()> { } // Max block cannot be a past block - assert!( - send_bundle(&driver, BundleOpts::default().with_block_number_max(1)) - .await - .is_err() - ); + assert!(send_bundle(&driver, BundleOpts::default().with_block_number_max(1)).await.is_err()); - // Bundles are valid if their max block in in between the current block and the max block range + // Bundles are valid if their max block in in between the current block and the + // max block range let current_block = 2; let next_valid_block = current_block + 1; for i in next_valid_block..next_valid_block + MAX_BLOCK_RANGE_BLOCKS { - assert!( - send_bundle(&driver, BundleOpts::default().with_block_number_max(i)) - .await - .is_ok() - ); + assert!(send_bundle(&driver, BundleOpts::default().with_block_number_max(i)).await.is_ok()); } // A bundle with a block out of range is invalid @@ -290,9 +265,7 @@ async fn bundle_range_limits(rbuilder: LocalInstance) -> eyre::Result<()> { assert!( send_bundle( &driver, - BundleOpts::default() - .with_block_number_max(1) - .with_block_number_min(2) + BundleOpts::default().with_block_number_max(1).with_block_number_min(2) ) .await .is_err() @@ -310,12 +283,9 @@ async fn bundle_range_limits(rbuilder: LocalInstance) -> eyre::Result<()> { .is_ok() ); assert!( - send_bundle( - &driver, - BundleOpts::default().with_block_number_max(next_valid_block) - ) - .await - .is_ok() + send_bundle(&driver, BundleOpts::default().with_block_number_max(next_valid_block)) + .await + .is_ok() ); // A bundle with a min block equal to max block is valid @@ -334,46 +304,34 @@ async fn bundle_range_limits(rbuilder: LocalInstance) -> eyre::Result<()> { // A bundle with only min block that's within the default range is valid let default_max = current_block + MAX_BLOCK_RANGE_BLOCKS; assert!( - send_bundle( - &driver, - BundleOpts::default().with_block_number_min(current_block) - ) - .await - .is_ok() + send_bundle(&driver, BundleOpts::default().with_block_number_min(current_block)) + .await + .is_ok() ); assert!( - send_bundle( - &driver, - BundleOpts::default().with_block_number_min(default_max - 1) - ) - .await - .is_ok() + send_bundle(&driver, BundleOpts::default().with_block_number_min(default_max - 1)) + .await + .is_ok() ); assert!( - send_bundle( - &driver, - BundleOpts::default().with_block_number_min(default_max) - ) - .await - .is_ok() + send_bundle(&driver, BundleOpts::default().with_block_number_min(default_max)) + .await + .is_ok() ); // A bundle with only min block that exceeds the default max range is invalid assert!( - send_bundle( - &driver, - BundleOpts::default().with_block_number_min(default_max + 1) - ) - .await - .is_err() + send_bundle(&driver, BundleOpts::default().with_block_number_min(default_max + 1)) + .await + .is_err() ); Ok(()) } -/// If a transaction reverts and was sent as a normal transaction through the eth_sendRawTransaction -/// bundle, the transaction should be included in the block. -/// This behaviour is the same as the 'disabled' test. +/// If a transaction reverts and was sent as a normal transaction through the +/// eth_sendRawTransaction bundle, the transaction should be included in the +/// block. This behaviour is the same as the 'disabled' test. #[rb_test(args = OpRbuilderArgs { enable_revert_protection: true, ..Default::default() @@ -382,16 +340,9 @@ async fn allow_reverted_transactions_without_bundle(rbuilder: LocalInstance) -> let driver = rbuilder.driver().await?; for _ in 0..10 { - let valid_tx = driver - .create_transaction() - .random_valid_transfer() - .send() - .await?; - let reverting_tx = driver - .create_transaction() - .random_reverting_transaction() - .send() - .await?; + let valid_tx = driver.create_transaction().random_valid_transfer().send().await?; + let reverting_tx = + driver.create_transaction().random_reverting_transaction().send().await?; let block = driver.build_new_block().await?; assert!(block.includes(valid_tx.tx_hash())); @@ -401,8 +352,8 @@ async fn allow_reverted_transactions_without_bundle(rbuilder: LocalInstance) -> Ok(()) } -/// If a transaction reverts and gets dropped it, the eth_getTransactionReceipt should return -/// an error message that it was dropped. +/// If a transaction reverts and gets dropped it, the eth_getTransactionReceipt +/// should return an error message that it was dropped. #[rb_test(args = OpRbuilderArgs { enable_revert_protection: true, ..OpRbuilderArgs::test_default() diff --git a/crates/op-rbuilder/src/tests/smoke.rs b/crates/op-rbuilder/src/tests/smoke.rs index 63cfa77b7..e98b46df6 100644 --- a/crates/op-rbuilder/src/tests/smoke.rs +++ b/crates/op-rbuilder/src/tests/smoke.rs @@ -23,9 +23,7 @@ async fn chain_produces_blocks(rbuilder: LocalInstance) -> eyre::Result<()> { let driver = rbuilder.driver().await?; #[cfg(target_os = "linux")] - let driver = driver - .with_validation_node(crate::tests::ExternalNode::reth().await?) - .await?; + let driver = driver.with_validation_node(crate::tests::ExternalNode::reth().await?).await?; const SAMPLE_SIZE: usize = 10; @@ -55,8 +53,9 @@ async fn chain_produces_blocks(rbuilder: LocalInstance) -> eyre::Result<()> { } } - // ensure that transactions are included in blocks and each block has all the transactions - // sent to it during its block time + the two mandatory transactions + // ensure that transactions are included in blocks and each block has all the + // transactions sent to it during its block time + the two mandatory + // transactions for _ in 0..SAMPLE_SIZE { let count = rand::random_range(1..8); let mut tx_hashes = HashSet::::default(); @@ -119,12 +118,7 @@ async fn produces_blocks_under_load_within_deadline(rbuilder: LocalInstance) -> async { // Keep the builder busy with new transactions. loop { - match driver - .create_transaction() - .random_valid_transfer() - .send() - .await - { + match driver.create_transaction().random_valid_transfer().send().await { Ok(_) => {} Err(e) if e.to_string().contains("txpool is full") => { // If the txpool is full, give it a short break @@ -151,8 +145,8 @@ async fn produces_blocks_under_load_within_deadline(rbuilder: LocalInstance) -> // Ensure that the builder can still produce blocks under // heavy load of incoming transactions. let block = tokio::time::timeout( - Duration::from_secs(rbuilder.args().chain_block_time) - + Duration::from_millis(500), + Duration::from_secs(rbuilder.args().chain_block_time) + + Duration::from_millis(500), driver.build_new_block_with_current_timestamp(None), ) .await @@ -202,9 +196,7 @@ async fn chain_produces_big_tx_with_gas_limit(rbuilder: LocalInstance) -> eyre:: let driver = rbuilder.driver().await?; #[cfg(target_os = "linux")] - let driver = driver - .with_validation_node(crate::tests::ExternalNode::reth().await?) - .await?; + let driver = driver.with_validation_node(crate::tests::ExternalNode::reth().await?).await?; // insert valid txn under limit let tx = driver @@ -259,9 +251,7 @@ async fn chain_produces_big_tx_without_gas_limit(rbuilder: LocalInstance) -> eyr let driver = rbuilder.driver().await?; #[cfg(target_os = "linux")] - let driver = driver - .with_validation_node(crate::tests::ExternalNode::reth().await?) - .await?; + let driver = driver.with_validation_node(crate::tests::ExternalNode::reth().await?).await?; // insert txn with gas usage but there is no limit let tx = driver diff --git a/crates/op-rbuilder/src/tests/txpool.rs b/crates/op-rbuilder/src/tests/txpool.rs index 235ccd268..7581a3da6 100644 --- a/crates/op-rbuilder/src/tests/txpool.rs +++ b/crates/op-rbuilder/src/tests/txpool.rs @@ -6,7 +6,8 @@ use reth::args::TxPoolArgs; use reth_node_builder::NodeConfig; use reth_optimism_chainspec::OpChainSpec; -/// This test ensures that pending pool custom limit is respected and priority tx would be included even when pool if full. +/// This test ensures that pending pool custom limit is respected and priority +/// tx would be included even when pool if full. #[rb_test( config = NodeConfig:: { txpool: TxPoolArgs { @@ -26,11 +27,7 @@ async fn pending_pool_limit(rbuilder: LocalInstance) -> eyre::Result<()> { let acc_with_priority = accounts.last().unwrap(); for _ in 0..50 { - let _ = driver - .create_transaction() - .with_signer(*acc_no_priority) - .send() - .await?; + let _ = driver.create_transaction().with_signer(*acc_no_priority).send().await?; } assert_eq!( @@ -59,7 +56,8 @@ async fn pending_pool_limit(rbuilder: LocalInstance) -> eyre::Result<()> { rbuilder.pool().pending_count() ); - // After we try building block our reverting tx would be removed and other tx will move to queue pool + // After we try building block our reverting tx would be removed and other tx + // will move to queue pool let block = driver.build_new_block().await?; // Ensure that 10 extra txs got included diff --git a/crates/op-rbuilder/src/tx.rs b/crates/op-rbuilder/src/tx.rs index 64f7e43f5..2848b6426 100644 --- a/crates/op-rbuilder/src/tx.rs +++ b/crates/op-rbuilder/src/tx.rs @@ -233,9 +233,7 @@ impl EthPoolTransaction for FBPooledTransaction { _sidecar: &BlobTransactionSidecarVariant, _settings: &KzgSettings, ) -> Result<(), BlobTransactionValidationError> { - Err(BlobTransactionValidationError::NotBlobTransaction( - self.ty(), - )) + Err(BlobTransactionValidationError::NotBlobTransaction(self.ty())) } } diff --git a/crates/op-rbuilder/src/tx_signer.rs b/crates/op-rbuilder/src/tx_signer.rs index 71ef7a4b5..9a8208577 100644 --- a/crates/op-rbuilder/src/tx_signer.rs +++ b/crates/op-rbuilder/src/tx_signer.rs @@ -23,11 +23,7 @@ impl Signer { let pubkey = secret.public_key(SECP256K1); let address = public_key_to_address(&pubkey); - Ok(Self { - address, - pubkey, - secret, - }) + Ok(Self { address, pubkey, secret }) } pub fn sign_message(&self, message: B256) -> Result { @@ -140,23 +136,12 @@ mod test { let pubkey_bytes = public_key.serialize_uncompressed(); // Verify the public key format - assert_eq!( - pubkey_bytes.len(), - 65, - "Uncompressed public key should be 65 bytes" - ); - assert_eq!( - pubkey_bytes[0], 0x04, - "Uncompressed public key should start with 0x04" - ); + assert_eq!(pubkey_bytes.len(), 65, "Uncompressed public key should be 65 bytes"); + assert_eq!(pubkey_bytes[0], 0x04, "Uncompressed public key should start with 0x04"); // Verify report data would be 64 bytes let report_data = &pubkey_bytes[1..65]; - assert_eq!( - report_data.len(), - 64, - "Report data should be exactly 64 bytes" - ); + assert_eq!(report_data.len(), 64, "Report data should be exactly 64 bytes"); } #[test] @@ -169,9 +154,6 @@ mod test { let address1 = public_key_to_address(&public_key); let address2 = public_key_to_address(&public_key); - assert_eq!( - address1, address2, - "Address derivation should be deterministic" - ); + assert_eq!(address1, address2, "Address derivation should be deterministic"); } } diff --git a/crates/tdx-quote-provider/src/lib.rs b/crates/tdx-quote-provider/src/lib.rs index 3aad8c3a4..fec555636 100644 --- a/crates/tdx-quote-provider/src/lib.rs +++ b/crates/tdx-quote-provider/src/lib.rs @@ -1,6 +1,7 @@ //! TDX Quote Provider //! -//! This crate provides functionality for generating and managing TDX attestation quotes. +//! This crate provides functionality for generating and managing TDX +//! attestation quotes. pub mod metrics; pub mod provider; diff --git a/crates/tdx-quote-provider/src/main.rs b/crates/tdx-quote-provider/src/main.rs index a820ce976..a9c74f97e 100644 --- a/crates/tdx-quote-provider/src/main.rs +++ b/crates/tdx-quote-provider/src/main.rs @@ -77,16 +77,12 @@ async fn main() -> eyre::Result<()> { metrics_addr.parse().expect("invalid metrics address"); let builder = PrometheusBuilder::new().with_http_listener(socket_addr); - builder - .install() - .expect("failed to setup Prometheus endpoint") + builder.install().expect("failed to setup Prometheus endpoint") } // Start the server let server = Server::new(ServerConfig { - listen_addr: format!("{}:{}", args.service_host, args.service_port) - .parse() - .unwrap(), + listen_addr: format!("{}:{}", args.service_host, args.service_port).parse().unwrap(), use_mock: args.mock, mock_attestation_path: args.mock_attestation_path, }); diff --git a/crates/tdx-quote-provider/src/provider.rs b/crates/tdx-quote-provider/src/provider.rs index 0479edf8a..af16a668e 100644 --- a/crates/tdx-quote-provider/src/provider.rs +++ b/crates/tdx-quote-provider/src/provider.rs @@ -60,9 +60,7 @@ pub struct MockAttestationProvider { impl MockAttestationProvider { pub fn new(mock_attestation_path: String) -> Self { - Self { - mock_attestation_path, - } + Self { mock_attestation_path } } } @@ -76,8 +74,7 @@ impl AttestationProvider for MockAttestationProvider { let mut file = File::open(self.mock_attestation_path.clone()) .map_err(AttestationError::ReadMockAttestationFailed)?; let mut buffer = Vec::new(); - file.read_to_end(&mut buffer) - .map_err(AttestationError::ReadMockAttestationFailed)?; + file.read_to_end(&mut buffer).map_err(AttestationError::ReadMockAttestationFailed)?; Ok(buffer) } } diff --git a/crates/tdx-quote-provider/src/server.rs b/crates/tdx-quote-provider/src/server.rs index b8d5e2955..9123806f4 100644 --- a/crates/tdx-quote-provider/src/server.rs +++ b/crates/tdx-quote-provider/src/server.rs @@ -61,35 +61,22 @@ impl Server { mock_attestation_path: config.mock_attestation_path.clone(), }; let quote_provider = get_attestation_provider(attestation_config); - Self { - quote_provider, - metrics: Metrics::default(), - config, - } + Self { quote_provider, metrics: Metrics::default(), config } } pub async fn listen(self) -> eyre::Result<()> { let router = Router::new() .route("/healthz", get(healthz_handler)) .route("/attest/{appdata}", get(attest_handler)) - .with_state(ServerState { - quote_provider: self.quote_provider, - metrics: self.metrics, - }); + .with_state(ServerState { quote_provider: self.quote_provider, metrics: self.metrics }); let listener = TcpListener::bind(self.config.listen_addr).await?; - info!( - message = "starting server", - address = listener.local_addr()?.to_string() - ); - - axum::serve( - listener, - router.into_make_service_with_connect_info::(), - ) - .with_graceful_shutdown(shutdown_signal()) - .await - .map_err(|e| eyre::eyre!("failed to start server: {}", e)) + info!(message = "starting server", address = listener.local_addr()?.to_string()); + + axum::serve(listener, router.into_make_service_with_connect_info::()) + .with_graceful_shutdown(shutdown_signal()) + .await + .map_err(|e| eyre::eyre!("failed to start server: {}", e)) } } @@ -137,10 +124,7 @@ async fn attest_handler( match server.quote_provider.get_attestation(report_data) { Ok(attestation) => { let duration = start.elapsed(); - server - .metrics - .attest_duration - .record(duration.as_secs_f64()); + server.metrics.attest_duration.record(duration.as_secs_f64()); Response::builder() .status(StatusCode::OK) @@ -153,9 +137,7 @@ async fn attest_handler( tracing::error!(target: "tdx_quote_provider", error = %e, "Failed to get TDX attestation"); Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) - .body(Body::from( - json!({"message": "Failed to get TDX attestation"}).to_string(), - )) + .body(Body::from(json!({"message": "Failed to get TDX attestation"}).to_string())) .unwrap() .into_response() } diff --git a/crates/tdx-quote-provider/tests/simple.rs b/crates/tdx-quote-provider/tests/simple.rs index afa060286..e6cd54c75 100644 --- a/crates/tdx-quote-provider/tests/simple.rs +++ b/crates/tdx-quote-provider/tests/simple.rs @@ -93,10 +93,7 @@ async fn test_mock_attest() { let response = harness.attest(report_data).await; assert!(response.is_ok()); let body = response.unwrap(); - assert_eq!( - body, - Bytes::from_static(include_bytes!("./test_data/quote.bin")) - ); + assert_eq!(body, Bytes::from_static(include_bytes!("./test_data/quote.bin"))); } #[tokio::test] diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 7855e6d55..70e1fb557 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,4 @@ [toolchain] channel = "1.88.0" components = ["rustfmt", "clippy"] + diff --git a/rustfmt.toml b/rustfmt.toml index 648e8368f..54d490932 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,2 +1,12 @@ reorder_imports = true imports_granularity = "Crate" +use_small_heuristics = "Max" +comment_width = 100 +wrap_comments = true +binop_separator = "Back" +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true +format_code_in_doc_comments = true +doc_comment_code_block_width = 100 +