From c6875b6f7b25c3b892bcb70c341eb33cf6fd01fc Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Thu, 5 Jun 2025 17:39:19 -0300 Subject: [PATCH 01/40] Initial RLPxConnection refactor to use spawned --- Cargo.lock | 276 ++++--- Cargo.toml | 2 + crates/l2/Cargo.toml | 4 +- crates/networking/p2p/Cargo.toml | 2 + crates/networking/p2p/discv4/server.rs | 18 +- crates/networking/p2p/network.rs | 52 +- crates/networking/p2p/rlpx.rs | 2 - crates/networking/p2p/rlpx/connection.rs | 652 ---------------- .../rlpx/{frame.rs => connection/codec.rs} | 11 +- .../p2p/rlpx/{ => connection}/handshake.rs | 69 +- crates/networking/p2p/rlpx/connection/mod.rs | 3 + .../networking/p2p/rlpx/connection/server.rs | 712 ++++++++++++++++++ crates/networking/p2p/rlpx/error.rs | 2 +- crates/networking/p2p/rlpx/eth/receipts.rs | 4 +- crates/networking/p2p/rlpx/eth/status.rs | 2 +- .../networking/p2p/rlpx/eth/transactions.rs | 6 +- crates/networking/p2p/rlpx/message.rs | 2 +- crates/networking/p2p/rlpx/p2p.rs | 8 +- crates/networking/p2p/rlpx/snap.rs | 22 +- crates/networking/p2p/rlpx/utils.rs | 4 +- 20 files changed, 978 insertions(+), 875 deletions(-) delete mode 100644 crates/networking/p2p/rlpx/connection.rs rename crates/networking/p2p/rlpx/{frame.rs => connection/codec.rs} (98%) rename crates/networking/p2p/rlpx/{ => connection}/handshake.rs (93%) create mode 100644 crates/networking/p2p/rlpx/connection/mod.rs create mode 100644 crates/networking/p2p/rlpx/connection/server.rs diff --git a/Cargo.lock b/Cargo.lock index aae9c3a76c..c07c8c3e43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -49,7 +49,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cipher", "cpufeatures", ] @@ -71,7 +71,7 @@ version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "once_cell", "version_check", "zerocopy 0.7.35", @@ -356,7 +356,7 @@ checksum = "ccb3ead547f4532bc8af961649942f0b9c16ee9226e26caa3f38420651cc0bf4" dependencies = [ "alloy-rlp", "bytes", - "cfg-if", + "cfg-if 1.0.0", "const-hex", "derive_more 0.99.19", "hex-literal", @@ -378,7 +378,7 @@ checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e" dependencies = [ "alloy-rlp", "bytes", - "cfg-if", + "cfg-if 1.0.0", "const-hex", "derive_more 2.0.1", "foldhash", @@ -1311,7 +1311,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line 0.24.2", - "cfg-if", + "cfg-if 1.0.0", "libc", "miniz_oxide", "object 0.36.7", @@ -1463,7 +1463,7 @@ dependencies = [ "arrayref", "arrayvec", "cc", - "cfg-if", + "cfg-if 1.0.0", "constant_time_eq", ] @@ -1714,6 +1714,12 @@ dependencies = [ "nom", ] +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + [[package]] name = "cfg-if" version = "1.0.0" @@ -1928,7 +1934,7 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "hex", "proptest", @@ -2025,7 +2031,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96e58d342ad113c2b878f16d5d034c03be492ae460cdbc02b7f0f2284d310c7d" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -2068,7 +2074,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -2107,17 +2113,41 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "crossbeam" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" +dependencies = [ + "cfg-if 0.1.10", + "crossbeam-channel 0.4.4", + "crossbeam-deque 0.7.4", + "crossbeam-epoch 0.8.2", + "crossbeam-queue 0.2.3", + "crossbeam-utils 0.7.2", +] + [[package]] name = "crossbeam" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" dependencies = [ - "crossbeam-channel", - "crossbeam-deque", - "crossbeam-epoch", - "crossbeam-queue", - "crossbeam-utils", + "crossbeam-channel 0.5.15", + "crossbeam-deque 0.8.6", + "crossbeam-epoch 0.9.18", + "crossbeam-queue 0.3.12", + "crossbeam-utils 0.8.21", +] + +[[package]] +name = "crossbeam-channel" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" +dependencies = [ + "crossbeam-utils 0.7.2", + "maybe-uninit", ] [[package]] @@ -2126,7 +2156,18 @@ version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ - "crossbeam-utils", + "crossbeam-utils 0.8.21", +] + +[[package]] +name = "crossbeam-deque" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" +dependencies = [ + "crossbeam-epoch 0.8.2", + "crossbeam-utils 0.7.2", + "maybe-uninit", ] [[package]] @@ -2135,8 +2176,23 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", + "crossbeam-epoch 0.9.18", + "crossbeam-utils 0.8.21", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", + "lazy_static", + "maybe-uninit", + "memoffset", + "scopeguard", ] [[package]] @@ -2145,7 +2201,18 @@ version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "crossbeam-utils", + "crossbeam-utils 0.8.21", +] + +[[package]] +name = "crossbeam-queue" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" +dependencies = [ + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", + "maybe-uninit", ] [[package]] @@ -2154,7 +2221,18 @@ version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" dependencies = [ - "crossbeam-utils", + "crossbeam-utils 0.8.21", +] + +[[package]] +name = "crossbeam-utils" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "lazy_static", ] [[package]] @@ -2247,7 +2325,7 @@ version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "curve25519-dalek-derive", "fiat-crypto", @@ -2354,7 +2432,7 @@ version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "num_cpus", "serde", ] @@ -2365,7 +2443,7 @@ version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "hashbrown 0.14.5", "lock_api", "once_cell", @@ -2378,8 +2456,8 @@ version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ - "cfg-if", - "crossbeam-utils", + "cfg-if 1.0.0", + "crossbeam-utils 0.8.21", "hashbrown 0.14.5", "lock_api", "once_cell", @@ -2426,7 +2504,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee99d08031ca34a4d044efbbb21dff9b8c54bb9d8c82a189187c0651ffdb9fbf" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "dashu-base", "num-modular", "num-order", @@ -2679,7 +2757,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "dirs-sys-next", ] @@ -2878,7 +2956,7 @@ version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -3032,7 +3110,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bytes", - "cfg-if", + "cfg-if 1.0.0", "clap 4.5.36", "clap_complete", "criterion", @@ -3072,7 +3150,7 @@ name = "ethrex-blockchain" version = "0.1.0" dependencies = [ "bytes", - "cfg-if", + "cfg-if 1.0.0", "ethrex-common", "ethrex-metrics", "ethrex-rlp", @@ -3240,6 +3318,8 @@ dependencies = [ "serde_json", "sha3", "snap", + "spawned-concurrency", + "spawned-rt", "thiserror 2.0.12", "tokio", "tokio-stream", @@ -3328,7 +3408,7 @@ dependencies = [ "axum 0.8.3", "axum-extra", "bytes", - "cfg-if", + "cfg-if 1.0.0", "envy", "ethrex-blockchain", "ethrex-common", @@ -3454,7 +3534,7 @@ version = "0.1.0" dependencies = [ "bincode", "bytes", - "cfg-if", + "cfg-if 1.0.0", "derive_more 1.0.0", "dyn-clone", "ethereum-types", @@ -3876,7 +3956,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -3887,7 +3967,7 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -3900,7 +3980,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "libc", "r-efi", @@ -4048,7 +4128,7 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crunchy", ] @@ -4627,7 +4707,7 @@ version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d89fd380afde86567dfba715db065673989d6253f42b88179abd3eae47bda4b" dependencies = [ - "crossbeam-deque", + "crossbeam-deque 0.8.6", "globset", "log", "memchr", @@ -4746,7 +4826,7 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -4902,7 +4982,7 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "ecdsa", "elliptic-curve", "once_cell", @@ -5019,7 +5099,7 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "windows-targets 0.52.6", ] @@ -5307,6 +5387,12 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + [[package]] name = "mdbx-sys" version = "12.12.0" @@ -5333,6 +5419,15 @@ dependencies = [ "libc", ] +[[package]] +name = "memoffset" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +dependencies = [ + "autocfg", +] + [[package]] name = "memuse" version = "0.2.2" @@ -5457,7 +5552,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ "bitflags 2.9.0", - "cfg-if", + "cfg-if 1.0.0", "cfg_aliases", "libc", ] @@ -5779,7 +5874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" dependencies = [ "bitflags 2.9.0", - "cfg-if", + "cfg-if 1.0.0", "foreign-types 0.3.2", "libc", "once_cell", @@ -6510,7 +6605,7 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "instant", "libc", "redox_syscall 0.2.16", @@ -6524,7 +6619,7 @@ version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "redox_syscall 0.5.11", "smallvec", @@ -6758,7 +6853,7 @@ source = "git+https://github.com/brevis-network/pico?rev=286feb9878d9e09347852e5 dependencies = [ "anyhow", "bincode", - "cfg-if", + "cfg-if 1.0.0", "env_logger 0.11.8", "getrandom 0.2.15", "hex", @@ -6784,7 +6879,7 @@ source = "git+https://github.com/brevis-network/pico#ece7e7e8401e7907924bf829503 dependencies = [ "anyhow", "bincode", - "cfg-if", + "cfg-if 1.0.0", "env_logger 0.11.8", "getrandom 0.2.15", "hex", @@ -6813,10 +6908,10 @@ dependencies = [ "backtrace", "bincode", "bytemuck", - "cfg-if", + "cfg-if 1.0.0", "clap 4.5.36", "cpu-time", - "crossbeam", + "crossbeam 0.8.4", "csv", "curve25519-dalek", "dashmap 6.1.0", @@ -6897,10 +6992,10 @@ dependencies = [ "backtrace", "bincode", "bytemuck", - "cfg-if", + "cfg-if 1.0.0", "clap 4.5.36", "cpu-time", - "crossbeam", + "crossbeam 0.8.4", "csv", "curve25519-dalek", "dashmap 6.1.0", @@ -7214,7 +7309,7 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "fnv", "lazy_static", "memchr", @@ -7280,7 +7375,7 @@ checksum = "fa9dae7b05c02ec1a6bc9bcf20d8bc64a7dcbf57934107902a872014899b741f" dependencies = [ "anyhow", "byteorder", - "cfg-if", + "cfg-if 1.0.0", "itertools 0.10.5", "once_cell", "parking_lot 0.12.3", @@ -7540,8 +7635,8 @@ version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-deque", - "crossbeam-utils", + "crossbeam-deque 0.8.6", + "crossbeam-utils 0.8.21", ] [[package]] @@ -7727,7 +7822,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a2c336f9921588e50871c00024feb51a521eca50ce6d01494bb9c50f837c8ed" dependencies = [ "auto_impl", - "cfg-if", + "cfg-if 1.0.0", "dyn-clone", "revm-interpreter 5.0.0", "revm-precompile 7.0.0", @@ -7742,7 +7837,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c175ecec83bba464aa8406502fe5bf670491c2ace81a153264891d43bc7fa332" dependencies = [ "auto_impl", - "cfg-if", + "cfg-if 1.0.0", "dyn-clone", "revm-interpreter 15.2.0", "revm-precompile 16.2.0", @@ -7815,7 +7910,7 @@ dependencies = [ "aurora-engine-modexp", "blst", "c-kzg", - "cfg-if", + "cfg-if 1.0.0", "k256", "once_cell", "revm-primitives 15.2.0", @@ -7836,7 +7931,7 @@ dependencies = [ "bitflags 2.9.0", "bitvec", "c-kzg", - "cfg-if", + "cfg-if 1.0.0", "derive_more 0.99.19", "dyn-clone", "enumn", @@ -7859,7 +7954,7 @@ dependencies = [ "bitflags 2.9.0", "bitvec", "c-kzg", - "cfg-if", + "cfg-if 1.0.0", "dyn-clone", "enumn", "hex", @@ -7896,7 +7991,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", - "cfg-if", + "cfg-if 1.0.0", "getrandom 0.2.15", "libc", "untrusted", @@ -7986,7 +8081,7 @@ checksum = "92c9d734d29c56ca0f00f492a544a6a1a2917c866c0108802dca8cd258a4de5a" dependencies = [ "anyhow", "bytemuck", - "cfg-if", + "cfg-if 1.0.0", "keccak", "paste", "rayon", @@ -8024,7 +8119,7 @@ checksum = "fb53805a927d0e0848b6d9cb0371ad5b582e61019190be86d41cca24cf8555b3" dependencies = [ "anyhow", "bytemuck", - "cfg-if", + "cfg-if 1.0.0", "cust", "downloader", "hex", @@ -8065,8 +8160,8 @@ dependencies = [ "auto_ops", "bytemuck", "byteorder", - "cfg-if", - "crossbeam", + "cfg-if 1.0.0", + "crossbeam 0.8.4", "crypto-bigint", "cust", "derive_more 1.0.0", @@ -8124,7 +8219,7 @@ dependencies = [ "alloy-sol-types", "anyhow", "bytemuck_derive", - "cfg-if", + "cfg-if 1.0.0", "risc0-zkvm", "thiserror 2.0.12", "tracing", @@ -8177,7 +8272,7 @@ dependencies = [ "blake2", "borsh", "bytemuck", - "cfg-if", + "cfg-if 1.0.0", "cust", "digest 0.10.7", "ff 0.13.1", @@ -8249,7 +8344,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8187245809fe15d389dcdc1ca09ed5f5a1227b15e2fd48860c808aaf02e6996f" dependencies = [ "bytemuck", - "cfg-if", + "cfg-if 1.0.0", "getrandom 0.2.15", "libm", "stability", @@ -8503,7 +8598,7 @@ version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "derive_more 1.0.0", "parity-scale-codec", "scale-info-derive", @@ -8792,7 +8887,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest 0.10.7", ] @@ -8804,7 +8899,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -8816,7 +8911,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest 0.10.7", ] @@ -8838,7 +8933,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ "cc", - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -9035,7 +9130,7 @@ dependencies = [ "bincode", "cbindgen", "cc", - "cfg-if", + "cfg-if 1.0.0", "elliptic-curve", "generic-array 1.1.0", "glob", @@ -9105,7 +9200,7 @@ version = "4.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dd52f719b0d494fb9983f32dbedebf7b3793e60337c1b4e25a2d727c72b2f0d" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "dashu", "elliptic-curve", "generic-array 1.1.0", @@ -9271,7 +9366,7 @@ dependencies = [ "backtrace", "cbindgen", "cc", - "cfg-if", + "cfg-if 1.0.0", "ff 0.13.1", "glob", "hashbrown 0.14.5", @@ -9325,7 +9420,7 @@ dependencies = [ "bincode", "bindgen 0.70.1", "cc", - "cfg-if", + "cfg-if 1.0.0", "hex", "num-bigint 0.4.6", "p3-baby-bear 0.2.2-succinct", @@ -9355,7 +9450,7 @@ dependencies = [ "async-trait", "backoff", "bincode", - "cfg-if", + "cfg-if 1.0.0", "dirs 5.0.1", "futures", "hashbrown 0.14.5", @@ -9428,7 +9523,7 @@ version = "0.8.0-sp1-4.0.0-v2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0800e0491c38cc686233518fce535d01ba0a0707781766fec38aee9c1b33890" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "ff 0.13.1", "group 0.13.0", "pairing 0.23.0", @@ -9440,7 +9535,7 @@ dependencies = [ [[package]] name = "spawned-concurrency" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.0-alpha#abd5476b7cc0feeafd96ca79c6844e87702c7f83" +source = "git+https://github.com/lambdaclass/spawned.git?rev=ded9a1f1d41b82020481913ad821e8cfae2b9f19#ded9a1f1d41b82020481913ad821e8cfae2b9f19" dependencies = [ "futures", "spawned-rt", @@ -9450,8 +9545,9 @@ dependencies = [ [[package]] name = "spawned-rt" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.0-alpha#abd5476b7cc0feeafd96ca79c6844e87702c7f83" +source = "git+https://github.com/lambdaclass/spawned.git?rev=ded9a1f1d41b82020481913ad821e8cfae2b9f19#ded9a1f1d41b82020481913ad821e8cfae2b9f19" dependencies = [ + "crossbeam 0.7.3", "tokio", "tracing", "tracing-subscriber 0.3.19", @@ -9669,7 +9765,7 @@ version = "0.30.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a5b4ddaee55fb2bea2bf0e5000747e5f5c0de765e5a5ff87f4cd106439f4bb3" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "core-foundation-sys", "libc", "ntapi", @@ -9835,7 +9931,7 @@ version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "once_cell", ] @@ -9933,7 +10029,7 @@ checksum = "a41f915e075a8a98ad64a5f7be6b7cc1710fc835c5f07e4a3efcaeb013291c00" dependencies = [ "aho-corasick 0.7.20", "clap 2.34.0", - "crossbeam-channel", + "crossbeam-channel 0.5.15", "dashmap 4.0.2", "dirs 3.0.2", "encoding_rs_io", @@ -10216,7 +10312,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ - "crossbeam-channel", + "crossbeam-channel 0.5.15", "thiserror 1.0.69", "time", "tracing-subscriber 0.3.19", @@ -10316,7 +10412,7 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6bca43faba247bc76eb1d6c1b8b561e4a1c5bdd427cc3d7a007faabea75c683a" dependencies = [ - "crossbeam-queue", + "crossbeam-queue 0.3.12", "tokio", ] @@ -10348,7 +10444,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "static_assertions", ] @@ -10589,7 +10685,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" dependencies = [ "anyhow", - "cfg-if", + "cfg-if 1.0.0", "git2", "rustversion", "time", @@ -10680,7 +10776,7 @@ version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "once_cell", "rustversion", "wasm-bindgen-macro", @@ -10706,7 +10802,7 @@ version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "once_cell", "wasm-bindgen", @@ -11367,7 +11463,7 @@ checksum = "1dcb24d0152526ae49b9b96c1dcf71850ca1e0b882e4e28ed898a93c41334744" dependencies = [ "arbitrary", "crc32fast", - "crossbeam-utils", + "crossbeam-utils 0.8.21", "flate2", "indexmap 2.9.0", "memchr", @@ -11386,7 +11482,7 @@ dependencies = [ "blake2", "bls12_381 0.7.1", "byteorder", - "cfg-if", + "cfg-if 1.0.0", "group 0.12.1", "group 0.13.0", "halo2", diff --git a/Cargo.toml b/Cargo.toml index 33b2704243..eb9fb395ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,3 +88,5 @@ libsecp256k1 = "0.7.2" clap = { version = "4.3", features = ["derive", "env"] } clap_complete = "4.5.17" eyre = "0.6.12" +spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", rev = "ded9a1f1d41b82020481913ad821e8cfae2b9f19"} +spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", rev = "ded9a1f1d41b82020481913ad821e8cfae2b9f19"} diff --git a/crates/l2/Cargo.toml b/crates/l2/Cargo.toml index 8db93c0ea0..a0ee1f01f1 100644 --- a/crates/l2/Cargo.toml +++ b/crates/l2/Cargo.toml @@ -33,10 +33,10 @@ keccak-hash.workspace = true envy = "0.4.2" rand.workspace = true thiserror.workspace = true +spawned-rt.workspace = true +spawned-concurrency.workspace = true directories = "5.0.1" bincode = "1.3.3" -spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", tag = "v0.1.0-alpha"} -spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", tag = "v0.1.0-alpha"} lazy_static.workspace = true zkvm_interface = { path = "./prover/zkvm/interface/" } diff --git a/crates/networking/p2p/Cargo.toml b/crates/networking/p2p/Cargo.toml index 3f8f8e75cc..8d5bc7937e 100644 --- a/crates/networking/p2p/Cargo.toml +++ b/crates/networking/p2p/Cargo.toml @@ -21,6 +21,8 @@ thiserror.workspace = true lazy_static.workspace = true snap.workspace = true serde.workspace = true +spawned-rt.workspace = true +spawned-concurrency.workspace = true tokio-stream = "0.1.17" futures = "0.3.31" diff --git a/crates/networking/p2p/discv4/server.rs b/crates/networking/p2p/discv4/server.rs index 695f8cab2d..588dccf7e3 100644 --- a/crates/networking/p2p/discv4/server.rs +++ b/crates/networking/p2p/discv4/server.rs @@ -10,8 +10,8 @@ use super::{ }; use crate::{ kademlia::{KademliaTable, MAX_NODES_PER_BUCKET}, - network::{handle_peer_as_initiator, P2PContext}, - rlpx::{connection::MAX_PEERS_TCP_CONNECTIONS, utils::node_id}, + network::P2PContext, + rlpx::{connection::server::RLPxConnection, utils::node_id}, types::{Endpoint, Node}, }; use ethrex_common::H256; @@ -27,6 +27,7 @@ use tracing::{debug, error}; const MAX_DISC_PACKET_SIZE: usize = 1280; const PROOF_EXPIRATION_IN_HS: u64 = 12; +pub const MAX_PEERS_TCP_CONNECTIONS: usize = 100; // These interval times are arbitrary numbers, maybe we should read them from a cfg or a cli param const REVALIDATION_INTERVAL_IN_SECONDS: u64 = 30; @@ -224,10 +225,8 @@ impl Discv4Server { return Ok(()); } - let ctx = self.ctx.clone(); - self.ctx - .tracker - .spawn(async move { handle_peer_as_initiator(ctx, peer.node).await }); + RLPxConnection::spawn_as_initiator(self.ctx.clone(), &peer.node).await; + Ok(()) } Message::FindNode(msg) => { @@ -518,11 +517,8 @@ impl Discv4Server { if active_connections >= MAX_PEERS_TCP_CONNECTIONS { return Ok(()); } - let ctx = self.ctx.clone(); - - self.ctx - .tracker - .spawn(async move { handle_peer_as_initiator(ctx, peer.node).await }); + + RLPxConnection::spawn_as_initiator(self.ctx.clone(), &peer.node).await; Ok(()) } diff --git a/crates/networking/p2p/network.rs b/crates/networking/p2p/network.rs index b5808b73d4..0276f0408e 100644 --- a/crates/networking/p2p/network.rs +++ b/crates/networking/p2p/network.rs @@ -1,13 +1,9 @@ use crate::kademlia::{self, KademliaTable}; +use crate::rlpx::connection::server::{RLPxConnBroadcastSender, RLPxConnection}; use crate::rlpx::p2p::SUPPORTED_SNAP_CAPABILITIES; -use crate::rlpx::{ - connection::RLPxConnBroadcastSender, handshake, message::Message as RLPxMessage, -}; +use crate::rlpx::message::Message as RLPxMessage; use crate::types::{Node, NodeRecord}; -use crate::{ - discv4::server::{DiscoveryError, Discv4Server}, - rlpx::utils::log_peer_debug, -}; +use crate::discv4::server::{DiscoveryError, Discv4Server}; use ethrex_blockchain::Blockchain; use ethrex_common::{H256, H512}; use ethrex_storage::Store; @@ -17,11 +13,11 @@ use k256::{ }; use std::{io, net::SocketAddr, sync::Arc}; use tokio::{ - net::{TcpListener, TcpSocket, TcpStream}, + net::{TcpListener, TcpSocket}, sync::Mutex, }; use tokio_util::task::TaskTracker; -use tracing::{debug, error, info}; +use tracing::{error, info}; // Totally arbitrary limit on how // many messages the connections can queue, @@ -133,9 +129,7 @@ pub(crate) async fn serve_p2p_requests(context: P2PContext) { } }; - context - .tracker - .spawn(handle_peer_as_receiver(context.clone(), peer_addr, stream)); + RLPxConnection::spawn_as_receiver(context.clone(), peer_addr, stream).await; } } @@ -145,40 +139,6 @@ fn listener(tcp_addr: SocketAddr) -> Result { tcp_socket.listen(50) } -async fn handle_peer_as_receiver(context: P2PContext, peer_addr: SocketAddr, stream: TcpStream) { - let table = context.table.clone(); - match handshake::as_receiver(context, peer_addr, stream).await { - Ok(mut conn) => conn.start(table, true).await, - Err(e) => { - debug!("Error creating tcp connection with peer at {peer_addr}: {e}") - } - } -} - -pub async fn handle_peer_as_initiator(context: P2PContext, node: Node) { - let addr = SocketAddr::new(node.ip, node.tcp_port); - let stream = match tcp_stream(addr).await { - Ok(result) => result, - Err(e) => { - log_peer_debug(&node, &format!("Error creating tcp connection {e}")); - context.table.lock().await.replace_peer(node.node_id()); - return; - } - }; - let table = context.table.clone(); - match handshake::as_initiator(context, node.clone(), stream).await { - Ok(mut conn) => conn.start(table, false).await, - Err(e) => { - log_peer_debug(&node, &format!("Error creating tcp connection {e}")); - table.lock().await.replace_peer(node.node_id()); - } - }; -} - -async fn tcp_stream(addr: SocketAddr) -> Result { - TcpSocket::new_v4()?.connect(addr).await -} - pub fn public_key_from_signing_key(signer: &SigningKey) -> H512 { let public_key = PublicKey::from(signer.verifying_key()); let encoded = public_key.to_encoded_point(false); diff --git a/crates/networking/p2p/rlpx.rs b/crates/networking/p2p/rlpx.rs index 12082b65c7..1880b39487 100644 --- a/crates/networking/p2p/rlpx.rs +++ b/crates/networking/p2p/rlpx.rs @@ -1,8 +1,6 @@ pub mod connection; pub mod error; pub mod eth; -pub mod frame; -pub mod handshake; pub mod message; pub mod p2p; pub mod snap; diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs deleted file mode 100644 index 720b6684db..0000000000 --- a/crates/networking/p2p/rlpx/connection.rs +++ /dev/null @@ -1,652 +0,0 @@ -use crate::{ - kademlia::PeerChannels, - rlpx::{ - error::RLPxError, - eth::{ - backend, - blocks::{BlockBodies, BlockHeaders}, - receipts::{GetReceipts, Receipts}, - transactions::{GetPooledTransactions, Transactions}, - }, - frame::RLPxCodec, - message::Message, - p2p::{ - self, Capability, DisconnectMessage, PingMessage, PongMessage, - SUPPORTED_ETH_CAPABILITIES, SUPPORTED_P2P_CAPABILITIES, SUPPORTED_SNAP_CAPABILITIES, - }, - utils::{log_peer_debug, log_peer_error}, - }, - snap::{ - process_account_range_request, process_byte_codes_request, process_storage_ranges_request, - process_trie_nodes_request, - }, - types::Node, -}; -use ethrex_blockchain::Blockchain; -use ethrex_common::{ - types::{MempoolTransaction, Transaction}, - H256, H512, -}; -use ethrex_storage::Store; -use futures::SinkExt; -use k256::{ecdsa::SigningKey, PublicKey, SecretKey}; -use rand::random; -use std::{collections::HashSet, sync::Arc}; -use tokio::{ - io::{AsyncRead, AsyncWrite}, - sync::{ - broadcast::{self, error::RecvError}, - mpsc, Mutex, - }, - task, - time::{sleep, Instant}, -}; -use tokio_stream::StreamExt; -use tokio_util::codec::Framed; -use tracing::debug; - -use super::{ - eth::transactions::NewPooledTransactionHashes, p2p::DisconnectReason, utils::log_peer_warn, -}; - -const PERIODIC_PING_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10); -const PERIODIC_TX_BROADCAST_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); -const PERIODIC_TASKS_CHECK_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); -pub const MAX_PEERS_TCP_CONNECTIONS: usize = 100; - -pub(crate) type Aes256Ctr64BE = ctr::Ctr64BE; - -pub(crate) type RLPxConnBroadcastSender = broadcast::Sender<(tokio::task::Id, Arc)>; - -pub(crate) struct RemoteState { - pub(crate) public_key: H512, - pub(crate) nonce: H256, - pub(crate) ephemeral_key: PublicKey, - pub(crate) init_message: Vec, -} - -pub(crate) struct LocalState { - pub(crate) nonce: H256, - pub(crate) ephemeral_key: SecretKey, - pub(crate) init_message: Vec, -} - -/// Fully working RLPx connection. -pub(crate) struct RLPxConnection { - signer: SigningKey, - node: Node, - framed: Framed, - storage: Store, - blockchain: Arc, - capabilities: Vec, - negotiated_eth_capability: Option, - negotiated_snap_capability: Option, - next_periodic_ping: Instant, - next_tx_broadcast: Instant, - broadcasted_txs: HashSet, - client_version: String, - /// Send end of the channel used to broadcast messages - /// to other connected peers, is ok to have it here, - /// since internally it's an Arc. - /// The ID is to ignore the message sent from the same task. - /// This is used both to send messages and to received broadcasted - /// messages from other connections (sent from other peers). - /// The receive end is instantiated after the handshake is completed - /// under `handle_peer`. - connection_broadcast_send: RLPxConnBroadcastSender, -} - -impl RLPxConnection { - #[allow(clippy::too_many_arguments)] - pub fn new( - signer: SigningKey, - node: Node, - stream: S, - codec: RLPxCodec, - storage: Store, - blockchain: Arc, - client_version: String, - connection_broadcast: RLPxConnBroadcastSender, - ) -> Self { - Self { - signer, - node, - framed: Framed::new(stream, codec), - storage, - blockchain, - capabilities: vec![], - negotiated_eth_capability: None, - negotiated_snap_capability: None, - next_periodic_ping: Instant::now() + PERIODIC_TASKS_CHECK_INTERVAL, - next_tx_broadcast: Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL, - broadcasted_txs: HashSet::new(), - client_version, - connection_broadcast_send: connection_broadcast, - } - } - - async fn post_handshake_checks( - &self, - table: Arc>, - ) -> Result<(), DisconnectReason> { - // Check if connected peers exceed the limit - let peer_count = { - let table_lock = table.lock().await; - table_lock.count_connected_peers() - }; - - if peer_count >= MAX_PEERS_TCP_CONNECTIONS { - return Err(DisconnectReason::TooManyPeers); - } - - Ok(()) - } - - /// Handshake already performed, now it starts a peer connection. - /// It runs in it's own task and blocks until the connection is dropped - pub async fn start( - &mut self, - table: Arc>, - inbound: bool, - ) { - log_peer_debug(&self.node, "Starting RLPx connection"); - - if let Err(reason) = self.post_handshake_checks(table.clone()).await { - self.connection_failed( - "Post handshake validations failed", - RLPxError::DisconnectSent(reason), - table, - ) - .await; - return; - } - - if let Err(e) = self.exchange_hello_messages().await { - self.connection_failed("Hello messages exchange failed", e, table) - .await; - } else { - // Handshake OK: handle connection - // Create channels to communicate directly to the peer - let (peer_channels, sender, receiver) = PeerChannels::create(); - - // NOTE: if the peer came from the discovery server it will already be inserted in the table - // but that might not always be the case, so we try to add it to the table - // Note: we don't ping the node we let the validation service do its job - { - let mut table_lock = table.lock().await; - table_lock.insert_node_forced(self.node.clone()); - table_lock.init_backend_communication( - self.node.node_id(), - peer_channels, - self.capabilities.clone(), - inbound, - ); - } - if let Err(e) = self.connection_loop(sender, receiver).await { - self.connection_failed("Error during RLPx connection", e, table) - .await; - } - } - } - - async fn send_disconnect_message(&mut self, reason: Option) { - self.send(Message::Disconnect(DisconnectMessage { reason })) - .await - .unwrap_or_else(|_| { - log_peer_debug( - &self.node, - &format!("Could not send Disconnect message: ({:?}).", reason), - ); - }); - } - - async fn connection_failed( - &mut self, - error_text: &str, - error: RLPxError, - table: Arc>, - ) { - log_peer_debug(&self.node, &format!("{error_text}: ({error})")); - - // Send disconnect message only if error is different than RLPxError::DisconnectRequested - // because if it is a DisconnectRequested error it means that the peer requested the disconnection, not us. - if !matches!(error, RLPxError::DisconnectReceived(_)) { - self.send_disconnect_message(self.match_disconnect_reason(&error)) - .await; - } - - // Discard peer from kademlia table in some cases - match error { - // already connected, don't discard it - RLPxError::DisconnectReceived(DisconnectReason::AlreadyConnected) - | RLPxError::DisconnectSent(DisconnectReason::AlreadyConnected) => { - log_peer_debug(&self.node, "Peer already connected, don't replace it"); - } - _ => { - let remote_public_key = self.node.public_key; - log_peer_debug( - &self.node, - &format!("{error_text}: ({error}), discarding peer {remote_public_key}"), - ); - table.lock().await.replace_peer(self.node.node_id()); - } - } - - let _ = self.framed.close().await; - } - - fn match_disconnect_reason(&self, error: &RLPxError) -> Option { - match error { - RLPxError::DisconnectSent(reason) => Some(*reason), - RLPxError::DisconnectReceived(reason) => Some(*reason), - RLPxError::RLPDecodeError(_) => Some(DisconnectReason::NetworkError), - // TODO build a proper matching between error types and disconnection reasons - _ => None, - } - } - - async fn exchange_hello_messages(&mut self) -> Result<(), RLPxError> { - let supported_capabilities: Vec = [ - &SUPPORTED_ETH_CAPABILITIES[..], - &SUPPORTED_SNAP_CAPABILITIES[..], - &SUPPORTED_P2P_CAPABILITIES[..], - ] - .concat(); - let hello_msg = Message::Hello(p2p::HelloMessage::new( - supported_capabilities, - PublicKey::from(self.signer.verifying_key()), - self.client_version.clone(), - )); - - self.send(hello_msg).await?; - - // Receive Hello message - let msg = match self.receive().await { - Some(msg) => msg?, - None => return Err(RLPxError::Disconnected()), - }; - - match msg { - Message::Hello(hello_message) => { - let mut negotiated_eth_version = 0; - let mut negotiated_snap_version = 0; - - log_peer_debug( - &self.node, - &format!( - "Hello message capabilities {:?}", - hello_message.capabilities - ), - ); - - // Check if we have any capability in common and store the highest version - for cap in &hello_message.capabilities { - match cap.protocol { - "eth" => { - if SUPPORTED_ETH_CAPABILITIES.contains(cap) - && cap.version > negotiated_eth_version - { - negotiated_eth_version = cap.version; - } - } - "snap" => { - if SUPPORTED_SNAP_CAPABILITIES.contains(cap) - && cap.version > negotiated_snap_version - { - negotiated_snap_version = cap.version; - } - } - _ => {} - } - } - - self.capabilities = hello_message.capabilities; - - if negotiated_eth_version == 0 { - return Err(RLPxError::NoMatchingCapabilities()); - } - debug!("Negotatied eth version: eth/{}", negotiated_eth_version); - self.negotiated_eth_capability = Some(Capability::eth(negotiated_eth_version)); - - if negotiated_snap_version != 0 { - debug!("Negotatied snap version: snap/{}", negotiated_snap_version); - self.negotiated_snap_capability = - Some(Capability::snap(negotiated_snap_version)); - } - - self.node.version = Some(hello_message.client_id); - - Ok(()) - } - Message::Disconnect(disconnect) => { - Err(RLPxError::DisconnectReceived(disconnect.reason())) - } - _ => { - // Fail if it is not a hello message - Err(RLPxError::BadRequest("Expected Hello message".to_string())) - } - } - } - - async fn connection_loop( - &mut self, - sender: mpsc::Sender, - mut receiver: mpsc::Receiver, - ) -> Result<(), RLPxError> { - self.init_peer_conn().await?; - log_peer_debug(&self.node, "Started peer main loop"); - - // Subscribe this connection to the broadcasting channel. - let mut broadcaster_receive = if self.negotiated_eth_capability.is_some() { - Some(self.connection_broadcast_send.subscribe()) - } else { - None - }; - - // Send transactions transaction hashes from mempool at connection start - self.send_new_pooled_tx_hashes().await?; - // Start listening for messages, - loop { - tokio::select! { - // Expect a message from the remote peer - Some(message) = self.receive() => { - match message { - Ok(message) => { - log_peer_debug(&self.node, &format!("Received message {}", message)); - self.handle_message(message, sender.clone()).await?; - }, - Err(e) => { - log_peer_debug(&self.node, &format!("Received RLPX Error in msg {}", e)); - return Err(e); - } - } - } - // Expect a message from the backend - Some(message) = receiver.recv() => { - log_peer_debug(&self.node, &format!("Sending message {}", message)); - self.send(message).await?; - } - // This is not ideal, but using the receiver without - // this function call, causes the loop to take ownwership - // of the variable and the compiler will complain about it, - // with this function, we avoid that. - // If the broadcaster is Some (i.e. we're connected to a peer that supports an eth protocol), - // we'll receive broadcasted messages from another connections through a channel, otherwise - // the function below will yield immediately but the select will not match and - // ignore the returned value. - Some(broadcasted_msg) = Self::maybe_wait_for_broadcaster(&mut broadcaster_receive) => { - self.handle_broadcast(broadcasted_msg?).await? - } - // Allow an interruption to check periodic tasks - _ = sleep(PERIODIC_TASKS_CHECK_INTERVAL) => (), // noop - } - self.check_periodic_tasks().await?; - } - } - - async fn maybe_wait_for_broadcaster( - receiver: &mut Option)>>, - ) -> Option), RecvError>> { - match receiver { - None => None, - Some(rec) => Some(rec.recv().await), - } - } - - async fn check_periodic_tasks(&mut self) -> Result<(), RLPxError> { - if Instant::now() >= self.next_periodic_ping { - self.send(Message::Ping(PingMessage {})).await?; - log_peer_debug(&self.node, "Ping sent"); - self.next_periodic_ping = Instant::now() + PERIODIC_PING_INTERVAL; - }; - if Instant::now() >= self.next_tx_broadcast { - self.send_new_pooled_tx_hashes().await?; - self.next_tx_broadcast = Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL; - } - Ok(()) - } - - async fn send_new_pooled_tx_hashes(&mut self) -> Result<(), RLPxError> { - if SUPPORTED_ETH_CAPABILITIES - .iter() - .any(|cap| self.capabilities.contains(cap)) - { - let filter = - |tx: &Transaction| -> bool { !self.broadcasted_txs.contains(&tx.compute_hash()) }; - let txs: Vec = self - .blockchain - .mempool - .filter_transactions_with_filter_fn(&filter)? - .into_values() - .flatten() - .collect(); - if !txs.is_empty() { - let tx_count = txs.len(); - for tx in txs { - self.send(Message::NewPooledTransactionHashes( - NewPooledTransactionHashes::new(vec![(*tx).clone()], &self.blockchain)?, - )) - .await?; - // Possible improvement: the mempool already knows the hash but the filter function does not return it - self.broadcasted_txs.insert((*tx).compute_hash()); - } - log_peer_debug( - &self.node, - &format!("Sent {} transactions to peer", tx_count), - ); - } - } - Ok(()) - } - - async fn handle_message( - &mut self, - message: Message, - sender: mpsc::Sender, - ) -> Result<(), RLPxError> { - let peer_supports_eth = self.negotiated_eth_capability.is_some(); - match message { - Message::Disconnect(msg_data) => { - log_peer_debug( - &self.node, - &format!("Received Disconnect: {}", msg_data.reason()), - ); - // TODO handle the disconnection request - return Err(RLPxError::DisconnectReceived(msg_data.reason())); - } - Message::Ping(_) => { - log_peer_debug(&self.node, "Sending pong message"); - self.send(Message::Pong(PongMessage {})).await?; - } - Message::Pong(_) => { - // We ignore received Pong messages - } - Message::Status(msg_data) => { - if let Some(eth) = &self.negotiated_eth_capability { - backend::validate_status(msg_data, &self.storage, eth.version).await? - }; - } - Message::GetAccountRange(req) => { - let response = process_account_range_request(req, self.storage.clone())?; - self.send(Message::AccountRange(response)).await? - } - // TODO(#1129) Add the transaction to the mempool once received. - Message::Transactions(txs) if peer_supports_eth => { - if self.blockchain.is_synced() { - let mut valid_txs = vec![]; - for tx in &txs.transactions { - if let Err(e) = self.blockchain.add_transaction_to_pool(tx.clone()).await { - log_peer_warn(&self.node, &format!("Error adding transaction: {}", e)); - continue; - } - valid_txs.push(tx.clone()); - } - self.broadcast_message(Message::Transactions(Transactions::new(valid_txs)))?; - } - } - Message::GetBlockHeaders(msg_data) if peer_supports_eth => { - let response = BlockHeaders { - id: msg_data.id, - block_headers: msg_data.fetch_headers(&self.storage).await, - }; - self.send(Message::BlockHeaders(response)).await?; - } - Message::GetBlockBodies(msg_data) if peer_supports_eth => { - let response = BlockBodies { - id: msg_data.id, - block_bodies: msg_data.fetch_blocks(&self.storage).await, - }; - self.send(Message::BlockBodies(response)).await?; - } - Message::GetReceipts(GetReceipts { id, block_hashes }) if peer_supports_eth => { - let mut receipts = Vec::new(); - for hash in block_hashes.iter() { - receipts.push(self.storage.get_receipts_for_block(hash)?); - } - let response = Receipts { id, receipts }; - self.send(Message::Receipts(response)).await?; - } - Message::NewPooledTransactionHashes(new_pooled_transaction_hashes) - if peer_supports_eth => - { - //TODO(#1415): evaluate keeping track of requests to avoid sending the same twice. - let hashes = - new_pooled_transaction_hashes.get_transactions_to_request(&self.blockchain)?; - - //TODO(#1416): Evaluate keeping track of the request-id. - let request = GetPooledTransactions::new(random(), hashes); - self.send(Message::GetPooledTransactions(request)).await?; - } - Message::GetPooledTransactions(msg) => { - let response = msg.handle(&self.blockchain)?; - self.send(Message::PooledTransactions(response)).await?; - } - Message::PooledTransactions(msg) if peer_supports_eth => { - if self.blockchain.is_synced() { - msg.handle(&self.node, &self.blockchain).await?; - } - } - Message::GetStorageRanges(req) => { - let response = process_storage_ranges_request(req, self.storage.clone())?; - self.send(Message::StorageRanges(response)).await? - } - Message::GetByteCodes(req) => { - let response = process_byte_codes_request(req, self.storage.clone())?; - self.send(Message::ByteCodes(response)).await? - } - Message::GetTrieNodes(req) => { - let response = process_trie_nodes_request(req, self.storage.clone())?; - self.send(Message::TrieNodes(response)).await? - } - // Send response messages to the backend - message @ Message::AccountRange(_) - | message @ Message::StorageRanges(_) - | message @ Message::ByteCodes(_) - | message @ Message::TrieNodes(_) - | message @ Message::BlockBodies(_) - | message @ Message::BlockHeaders(_) - | message @ Message::Receipts(_) => sender.send(message).await?, - // TODO: Add new message types and handlers as they are implemented - message => return Err(RLPxError::MessageNotHandled(format!("{message}"))), - }; - Ok(()) - } - - async fn handle_broadcast( - &mut self, - (id, broadcasted_msg): (task::Id, Arc), - ) -> Result<(), RLPxError> { - if id != tokio::task::id() { - match broadcasted_msg.as_ref() { - Message::Transactions(ref txs) => { - // TODO(#1131): Avoid cloning this vector. - let cloned = txs.transactions.clone(); - let new_msg = Message::Transactions(Transactions { - transactions: cloned, - }); - self.send(new_msg).await?; - } - msg => { - let error_message = format!("Non-supported message broadcasted: {msg}"); - log_peer_error(&self.node, &error_message); - return Err(RLPxError::BroadcastError(error_message)); - } - } - } - Ok(()) - } - - async fn init_peer_conn(&mut self) -> Result<(), RLPxError> { - // Sending eth Status if peer supports it - if let Some(eth) = self.negotiated_eth_capability.clone() { - let status = backend::get_status(&self.storage, eth.version).await?; - log_peer_debug(&self.node, "Sending status"); - self.send(Message::Status(status)).await?; - // The next immediate message in the ETH protocol is the - // status, reference here: - // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#status-0x00 - let msg = match self.receive().await { - Some(msg) => msg?, - None => return Err(RLPxError::Disconnected()), - }; - match msg { - Message::Status(msg_data) => { - log_peer_debug(&self.node, "Received Status"); - backend::validate_status(msg_data, &self.storage, eth.version).await? - } - Message::Disconnect(disconnect) => { - return Err(RLPxError::HandshakeError(format!( - "Peer disconnected due to: {}", - disconnect.reason() - ))) - } - _ => { - return Err(RLPxError::HandshakeError( - "Expected a Status message".to_string(), - )) - } - } - } - - Ok(()) - } - - async fn send(&mut self, message: Message) -> Result<(), RLPxError> { - self.framed.send(message).await - } - - /// Reads from the frame until a frame is available. - /// - /// Returns `None` when the stream buffer is 0. This could indicate that the client has disconnected, - /// but we cannot safely assume an EOF, as per the Tokio documentation. - /// - /// If the handshake has not been established, it is reasonable to terminate the connection. - /// - /// For an established connection, [`check_periodic_task`] will detect actual disconnections - /// while sending pings and you should not assume a disconnection. - /// - /// See [`Framed::new`] for more details. - async fn receive(&mut self) -> Option> { - self.framed.next().await - } - - fn broadcast_message(&self, msg: Message) -> Result<(), RLPxError> { - match msg { - txs_msg @ Message::Transactions(_) => { - let txs = Arc::new(txs_msg); - let task_id = tokio::task::id(); - let Ok(_) = self.connection_broadcast_send.send((task_id, txs)) else { - let error_message = "Could not broadcast received transactions"; - log_peer_error(&self.node, error_message); - return Err(RLPxError::BroadcastError(error_message.to_owned())); - }; - Ok(()) - } - msg => { - let error_message = format!("Broadcasting for msg: {msg} is not supported"); - log_peer_error(&self.node, &error_message); - Err(RLPxError::BroadcastError(error_message)) - } - } - } -} diff --git a/crates/networking/p2p/rlpx/frame.rs b/crates/networking/p2p/rlpx/connection/codec.rs similarity index 98% rename from crates/networking/p2p/rlpx/frame.rs rename to crates/networking/p2p/rlpx/connection/codec.rs index 9dd17fa451..2df38b1d10 100644 --- a/crates/networking/p2p/rlpx/frame.rs +++ b/crates/networking/p2p/rlpx/connection/codec.rs @@ -1,9 +1,6 @@ -use super::{ - connection::{Aes256Ctr64BE, LocalState, RemoteState}, - error::RLPxError, - message as rlpx, - utils::ecdh_xchng, -}; +use crate::rlpx::{message as rlpx, error::RLPxError, utils::ecdh_xchng}; + +use super::handshake::{LocalState, RemoteState}; use aes::{ cipher::{BlockEncrypt as _, KeyInit as _, KeyIvInit, StreamCipher as _}, Aes256Enc, @@ -19,6 +16,8 @@ use tokio_util::codec::{Decoder, Encoder, Framed}; // Taken from https://github.com/ethereum/go-ethereum/blob/82e963e5c981e36dc4b607dd0685c64cf4aabea8/p2p/rlpx/rlpx.go#L152 const MAX_MESSAGE_SIZE: usize = 0xFFFFFF; +type Aes256Ctr64BE = ctr::Ctr64BE; + pub(crate) struct RLPxCodec { pub(crate) mac_key: H256, pub(crate) ingress_mac: Keccak256, diff --git a/crates/networking/p2p/rlpx/handshake.rs b/crates/networking/p2p/rlpx/connection/handshake.rs similarity index 93% rename from crates/networking/p2p/rlpx/handshake.rs rename to crates/networking/p2p/rlpx/connection/handshake.rs index 46d1387237..e4466693b2 100644 --- a/crates/networking/p2p/rlpx/handshake.rs +++ b/crates/networking/p2p/rlpx/connection/handshake.rs @@ -1,11 +1,7 @@ -use std::net::SocketAddr; - use crate::{ network::P2PContext, rlpx::{ - connection::{LocalState, RLPxConnection, RemoteState}, error::RLPxError, - frame::RLPxCodec, utils::{ compress_pubkey, decompress_pubkey, ecdh_xchng, kdf, log_peer_debug, sha256, sha256_hmac, @@ -29,52 +25,52 @@ use k256::{ use rand::Rng; use sha3::{Digest, Keccak256}; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use tokio_util::codec::Framed; +use tracing::info; + +use super::codec::RLPxCodec; type Aes128Ctr64BE = ctr::Ctr64BE; // https://github.com/ethereum/go-ethereum/blob/master/p2p/peer.go#L44 pub const P2P_MAX_MESSAGE_SIZE: usize = 2048; -pub(crate) async fn as_receiver( - context: P2PContext, - peer_addr: SocketAddr, - mut stream: S, -) -> Result, RLPxError> +pub(crate) struct RemoteState { + pub(crate) public_key: H512, + pub(crate) nonce: H256, + pub(crate) ephemeral_key: PublicKey, + pub(crate) init_message: Vec, +} + +pub(crate) struct LocalState { + pub(crate) nonce: H256, + pub(crate) ephemeral_key: SecretKey, + pub(crate) init_message: Vec, +} + +pub(crate) async fn new_as_receiver(context: P2PContext, mut stream: S) -> Result<(Framed, H512), RLPxError> where S: AsyncRead + AsyncWrite + std::marker::Unpin, { + info!("Starting handshake as receiver!"); let remote_state = receive_auth(&context.signer, &mut stream).await?; let local_state = send_ack(remote_state.public_key, &mut stream).await?; let hashed_nonces: [u8; 32] = Keccak256::digest([local_state.nonce.0, remote_state.nonce.0].concat()).into(); - let node = Node::new( - peer_addr.ip(), - peer_addr.port(), - peer_addr.port(), - remote_state.public_key, - ); let codec = RLPxCodec::new(&local_state, &remote_state, hashed_nonces); - log_peer_debug(&node, "Completed handshake as receiver!"); - Ok(RLPxConnection::new( - context.signer, - node, - stream, - codec, - context.storage, - context.blockchain, - context.client_version, - context.broadcast, - )) + Ok((Framed::new(stream, codec), remote_state.public_key)) } -pub(crate) async fn as_initiator( +pub(crate) async fn new_as_initiator( context: P2PContext, - node: Node, + node: &Node, mut stream: S, -) -> Result, RLPxError> +) -> Result, RLPxError> where S: AsyncRead + AsyncWrite + std::marker::Unpin, { + + info!("Starting handshake as initiator!"); let local_state = send_auth(&context.signer, node.public_key, &mut stream).await?; let remote_state = receive_ack(&context.signer, node.public_key, &mut stream).await?; // Local node is initator @@ -82,17 +78,8 @@ where let hashed_nonces: [u8; 32] = Keccak256::digest([remote_state.nonce.0, local_state.nonce.0].concat()).into(); let codec = RLPxCodec::new(&local_state, &remote_state, hashed_nonces); - log_peer_debug(&node, "Completed handshake as initiator!"); - Ok(RLPxConnection::new( - context.signer, - node, - stream, - codec, - context.storage, - context.blockchain, - context.client_version, - context.broadcast, - )) + log_peer_debug(node, "Completed handshake as initiator!"); + Ok(Framed::new(stream, codec)) } async fn send_auth( @@ -519,7 +506,7 @@ mod tests { use hex_literal::hex; use k256::SecretKey; - use crate::rlpx::{handshake::decode_ack_message, utils::decompress_pubkey}; + use crate::rlpx::{connection::handshake::decode_ack_message, utils::decompress_pubkey}; #[test] fn test_ack_decoding() { diff --git a/crates/networking/p2p/rlpx/connection/mod.rs b/crates/networking/p2p/rlpx/connection/mod.rs new file mode 100644 index 0000000000..c036883080 --- /dev/null +++ b/crates/networking/p2p/rlpx/connection/mod.rs @@ -0,0 +1,3 @@ +pub mod server; +mod handshake; +mod codec; \ No newline at end of file diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs new file mode 100644 index 0000000000..4599cd3454 --- /dev/null +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -0,0 +1,712 @@ +use std::{collections::HashSet, net::SocketAddr, sync::Arc, time::Instant}; + +use ethrex_blockchain::Blockchain; +use ethrex_common::{types::{MempoolTransaction, Transaction}, H256}; +use ethrex_storage::Store; +use k256::{ecdsa::SigningKey, PublicKey}; +use rand::random; +use spawned_concurrency::tasks::{CallResponse, CastResponse, GenServer, GenServerHandle, GenServerInMsg}; +use spawned_rt::tasks::mpsc::Sender; +use tokio::{net::{TcpSocket, TcpStream}, sync::{broadcast, Mutex}}; +use tokio_util::codec::Framed; +use tokio_stream::StreamExt; +use futures::SinkExt; +use tracing::{debug, error}; + +use crate::{discv4::server::MAX_PEERS_TCP_CONNECTIONS, kademlia::{KademliaTable, PeerChannels}, network::P2PContext, rlpx::{error::RLPxError, eth::{backend, blocks::{BlockBodies, BlockHeaders}, receipts::{GetReceipts, Receipts}, transactions::{GetPooledTransactions, NewPooledTransactionHashes, Transactions}}, message::Message, p2p::{self, Capability, DisconnectMessage, DisconnectReason, PongMessage, SUPPORTED_ETH_CAPABILITIES, SUPPORTED_P2P_CAPABILITIES, SUPPORTED_SNAP_CAPABILITIES}, utils::{log_peer_debug, log_peer_error, log_peer_warn}}, snap::{process_account_range_request, process_byte_codes_request, process_storage_ranges_request, process_trie_nodes_request}, types::Node}; + +use super::{codec::RLPxCodec, handshake}; + +pub(crate) type RLPxConnBroadcastSender = broadcast::Sender<(tokio::task::Id, Arc)>; + +const PERIODIC_TX_BROADCAST_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); +const PERIODIC_TASKS_CHECK_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); + +type MsgResult = Result; +type RLPxConnectionHandle = GenServerHandle; + +#[derive(Clone)] +pub struct RLPxConnectionState { + pub(crate) signer: SigningKey, + pub(crate) node: Node, + pub(crate) framed: Arc>>, + pub(crate) storage: Store, + pub(crate) blockchain: Arc, + pub(crate) capabilities: Vec, + pub(crate) negotiated_eth_capability: Option, + pub(crate) negotiated_snap_capability: Option, + pub(crate) next_periodic_ping: Instant, + pub(crate) next_tx_broadcast: Instant, + pub(crate) broadcasted_txs: HashSet, + pub(crate) client_version: String, + //// Send end of the channel used to broadcast messages + //// to other connected peers, is ok to have it here, + //// since internally it's an Arc. + //// The ID is to ignore the message sent from the same task. + //// This is used both to send messages and to received broadcasted + //// messages from other connections (sent from other peers). + //// The receive end is instantiated after the handshake is completed + //// under `handle_peer`. + connection_broadcast_send: RLPxConnBroadcastSender, + table: Arc>, + inbound: bool, +} + +impl RLPxConnectionState { + pub async fn new_as_receiver(context: P2PContext, peer_addr: SocketAddr, stream: TcpStream) -> Result { + let (framed, remote_key) = handshake::new_as_receiver(context.clone(), stream).await?; + let node = Node::new( + peer_addr.ip(), + peer_addr.port(), + peer_addr.port(), + remote_key, + ); + Ok(Self{ + signer: context.signer, + node, + framed: Arc::new(Mutex::new(framed)), + storage: context.storage, + blockchain: context.blockchain, + capabilities: vec![], + negotiated_eth_capability: None, + negotiated_snap_capability: None, + next_periodic_ping: Instant::now() + PERIODIC_TASKS_CHECK_INTERVAL, + next_tx_broadcast: Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL, + broadcasted_txs: HashSet::new(), + client_version: context.client_version, + connection_broadcast_send: context.broadcast, + table: context.table, + inbound: true, + }) + } + + pub async fn new_as_initiator(context: P2PContext, node: &Node, stream: TcpStream) -> Result { + let framed = handshake::new_as_initiator(context.clone(), node, stream).await?; + Ok(Self{ + signer: context.signer, + node: node.clone(), + framed: Arc::new(Mutex::new(framed)), + storage: context.storage, + blockchain: context.blockchain, + capabilities: vec![], + negotiated_eth_capability: None, + negotiated_snap_capability: None, + next_periodic_ping: Instant::now() + PERIODIC_TASKS_CHECK_INTERVAL, + next_tx_broadcast: Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL, + broadcasted_txs: HashSet::new(), + client_version: context.client_version, + connection_broadcast_send: context.broadcast, + table: context.table, + inbound: false, + }) + } +} + +#[derive(Clone)] +pub enum InMessage { + PeerMessage(Message), + BroadcastMessage, + BackendMessage, + PeriodicCheck, +} + +#[allow(dead_code)] +#[derive(Clone, PartialEq)] +pub enum OutMessage { + Done, + Error, +} + +pub struct RLPxConnection {} + +impl RLPxConnection { + + pub async fn spawn_as_receiver(context: P2PContext, peer_addr: SocketAddr, stream: TcpStream) { + match RLPxConnectionState::new_as_receiver(context, peer_addr, stream).await { + Ok(mut state) => { + init(&mut state).await; + let node = state.node.clone(); + let framed = state.framed.clone(); + let conn = RLPxConnection::start(state); + // Send Init message to perform post handshake and initial checks. + spawn_listener(conn, node, framed); + } + Err(error) => error!("Error starting RLPxConnection: {}", error), + }; + } + + pub async fn spawn_as_initiator(context: P2PContext, node: &Node) { + let addr = SocketAddr::new(node.ip, node.tcp_port); + let stream = match tcp_stream(addr).await { + Ok(result) => result, + Err(error) => { + log_peer_debug(node, &format!("Error creating tcp connection {error}")); + context.table.lock().await.replace_peer(node.node_id()); + return; + } + }; + let table = context.table.clone(); + match RLPxConnectionState::new_as_initiator(context, node, stream).await { + Ok(mut state) => { + init(&mut state).await; + let node = state.node.clone(); + let framed = state.framed.clone(); + let conn = RLPxConnection::start(state); + // Send Init message to perform post handshake and initial checks. + spawn_listener(conn, node, framed); + } + Err(error) => { + log_peer_debug(node, &format!("Error starting RLPxConnection: {error}")); + table.lock().await.replace_peer(node.node_id()); + } + }; + } + + // pub async fn spawn_as_receiver_old(context: P2PContext, peer_addr: SocketAddr, stream: TcpStream) { + // let table = context.table.clone(); + // match handshake::as_receiver(context, peer_addr, stream).await { + // Ok(mut conn) => conn.start(table, true).await, + // Err(e) => { + // debug!("Error creating tcp connection with peer at {peer_addr}: {e}") + // } + // } + // } +} + +impl GenServer for RLPxConnection { + type InMsg = InMessage; + type OutMsg = MsgResult; + type State = RLPxConnectionState; + type Error = RLPxError; + + fn new() -> Self { + Self {} + } + + + async fn handle_call( + &mut self, + message: Self::InMsg, + _tx: &Sender>, + state: &mut Self::State, + ) -> CallResponse { + match message.clone() { + InMessage::PeerMessage(message) => { + let _ = handle_message(state, message).await; + CallResponse::Reply(Ok(OutMessage::Done)) + }, + InMessage::BroadcastMessage => todo!(), + InMessage::BackendMessage => todo!(), + InMessage::PeriodicCheck => todo!(), + } + } + + async fn handle_cast( + &mut self, + _message: Self::InMsg, + _tx: &Sender>, + _state: &mut Self::State, + ) -> CastResponse { + CastResponse::NoReply + } +} + +async fn tcp_stream(addr: SocketAddr) -> Result { + TcpSocket::new_v4()?.connect(addr).await +} + +async fn init(state: &mut RLPxConnectionState) { + log_peer_debug(&state.node, "Starting RLPx connection"); + + if let Err(reason) = post_handshake_checks(state.table.clone()).await { + connection_failed( + state, + "Post handshake validations failed", + RLPxError::DisconnectSent(reason), + ) + .await; + return; + } + + if let Err(e) = exchange_hello_messages(state).await { + connection_failed(state, "Hello messages exchange failed", e) + .await; + } else { + // Handshake OK: handle connection + // Create channels to communicate directly to the peer + let (peer_channels, sender, receiver) = PeerChannels::create(); + + // NOTE: if the peer came from the discovery server it will already be inserted in the table + // but that might not always be the case, so we try to add it to the table + // Note: we don't ping the node we let the validation service do its job + { + let mut table_lock = state.table.lock().await; + table_lock.insert_node_forced(state.node.clone()); + table_lock.init_backend_communication( + state.node.node_id(), + peer_channels, + state.capabilities.clone(), + state.inbound, + ); + } + // TODO Handle this unwrap + let _ = init_peer_conn(state).await.unwrap(); + log_peer_debug(&state.node, "Started peer main loop"); + // Subscribe this connection to the broadcasting channel. + let mut broadcaster_receive = if state.negotiated_eth_capability.is_some() { + Some(state.connection_broadcast_send.subscribe()) + } else { + None + }; + // Send transactions transaction hashes from mempool at connection start + send_new_pooled_tx_hashes(state).await.unwrap(); + + // TCP listener loop + let framed = state.framed.clone(); + let node = state.node.clone(); + + // if let Err(e) = self.connection_loop(sender, receiver).await { + // self.connection_failed("Error during RLPx connection", e, state.table.clone()) + // .await; + // } + } +} + +async fn send_new_pooled_tx_hashes(state: &mut RLPxConnectionState) -> Result<(), RLPxError> { + if SUPPORTED_ETH_CAPABILITIES + .iter() + .any(|cap| state.capabilities.contains(cap)) + { + let filter = + |tx: &Transaction| -> bool { !state.broadcasted_txs.contains(&tx.compute_hash()) }; + let txs: Vec = state + .blockchain + .mempool + .filter_transactions_with_filter_fn(&filter)? + .into_values() + .flatten() + .collect(); + if !txs.is_empty() { + let tx_count = txs.len(); + for tx in txs { + send(state, Message::NewPooledTransactionHashes( + NewPooledTransactionHashes::new(vec![(*tx).clone()], &state.blockchain)?, + )) + .await?; + // Possible improvement: the mempool already knows the hash but the filter function does not return it + state.broadcasted_txs.insert((*tx).compute_hash()); + } + log_peer_debug( + &state.node, + &format!("Sent {} transactions to peer", tx_count), + ); + } + } + Ok(()) +} + +// async fn connection_loop( +// &mut self, +// sender: tokio::sync::mpsc::Sender, +// mut receiver: tokio::sync::mpsc::Receiver, +// ) -> Result<(), RLPxError> { + +// // Start listening for messages, +// loop { +// tokio::select! { +// // Expect a message from the remote peer +// Some(message) = self.receive() => { +// match message { +// Ok(message) => { +// log_peer_debug(&self.node, &format!("Received message {}", message)); +// self.handle_message(message, sender.clone()).await?; +// }, +// Err(e) => { +// log_peer_debug(&self.node, &format!("Received RLPX Error in msg {}", e)); +// return Err(e); +// } +// } +// } +// // Expect a message from the backend +// Some(message) = receiver.recv() => { +// log_peer_debug(&self.node, &format!("Sending message {}", message)); +// self.send(message).await?; +// } +// // This is not ideal, but using the receiver without +// // this function call, causes the loop to take ownwership +// // of the variable and the compiler will complain about it, +// // with this function, we avoid that. +// // If the broadcaster is Some (i.e. we're connected to a peer that supports an eth protocol), +// // we'll receive broadcasted messages from another connections through a channel, otherwise +// // the function below will yield immediately but the select will not match and +// // ignore the returned value. +// Some(broadcasted_msg) = Self::maybe_wait_for_broadcaster(&mut broadcaster_receive) => { +// self.handle_broadcast(broadcasted_msg?).await? +// } +// // Allow an interruption to check periodic tasks +// _ = sleep(PERIODIC_TASKS_CHECK_INTERVAL) => (), // noop +// } +// self.check_periodic_tasks().await?; +// } +// } + +async fn init_peer_conn(state: &mut RLPxConnectionState) -> Result<(), RLPxError> { + // Sending eth Status if peer supports it + if let Some(eth) = state.negotiated_eth_capability.clone() { + let status = backend::get_status(&state.storage, eth.version).await?; + log_peer_debug(&state.node, "Sending status"); + send(state, Message::Status(status)).await?; + // The next immediate message in the ETH protocol is the + // status, reference here: + // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#status-0x00 + let msg = match receive(state).await { + Some(msg) => msg?, + None => return Err(RLPxError::Disconnected()), + }; + match msg { + Message::Status(msg_data) => { + log_peer_debug(&state.node, "Received Status"); + backend::validate_status(msg_data, &state.storage, eth.version).await? + } + Message::Disconnect(disconnect) => { + return Err(RLPxError::HandshakeError(format!( + "Peer disconnected due to: {}", + disconnect.reason() + ))) + } + _ => { + return Err(RLPxError::HandshakeError( + "Expected a Status message".to_string(), + )) + } + } + } + + Ok(()) +} + +async fn post_handshake_checks( + table: Arc>, +) -> Result<(), DisconnectReason> { + // Check if connected peers exceed the limit + let peer_count = { + let table_lock = table.lock().await; + table_lock.count_connected_peers() + }; + + if peer_count >= MAX_PEERS_TCP_CONNECTIONS { + return Err(DisconnectReason::TooManyPeers); + } + + Ok(()) +} + +async fn send_disconnect_message(state: &mut RLPxConnectionState, reason: Option) { + send(state, Message::Disconnect(DisconnectMessage { reason })) + .await + .unwrap_or_else(|_| { + log_peer_debug( + &state.node, + &format!("Could not send Disconnect message: ({:?}).", reason), + ); + }); +} + +async fn connection_failed( + state: &mut RLPxConnectionState, + error_text: &str, + error: RLPxError, +) { + log_peer_debug(&state.node, &format!("{error_text}: ({error})")); + + // Send disconnect message only if error is different than RLPxError::DisconnectRequested + // because if it is a DisconnectRequested error it means that the peer requested the disconnection, not us. + if !matches!(error, RLPxError::DisconnectReceived(_)) { + send_disconnect_message(state, match_disconnect_reason(&error)) + .await; + } + + // Discard peer from kademlia table in some cases + match error { + // already connected, don't discard it + RLPxError::DisconnectReceived(DisconnectReason::AlreadyConnected) + | RLPxError::DisconnectSent(DisconnectReason::AlreadyConnected) => { + log_peer_debug(&state.node, "Peer already connected, don't replace it"); + } + _ => { + let remote_public_key = state.node.public_key; + log_peer_debug( + &state.node, + &format!("{error_text}: ({error}), discarding peer {remote_public_key}"), + ); + state.table.lock().await.replace_peer(state.node.node_id()); + } + } + + let _ = state.framed.lock().await.close().await; +} + +fn match_disconnect_reason(error: &RLPxError) -> Option { + match error { + RLPxError::DisconnectSent(reason) => Some(*reason), + RLPxError::DisconnectReceived(reason) => Some(*reason), + RLPxError::RLPDecodeError(_) => Some(DisconnectReason::NetworkError), + // TODO build a proper matching between error types and disconnection reasons + _ => None, + } +} + +async fn exchange_hello_messages(state: &mut RLPxConnectionState) -> Result<(), RLPxError> { + let supported_capabilities: Vec = [ + &SUPPORTED_ETH_CAPABILITIES[..], + &SUPPORTED_SNAP_CAPABILITIES[..], + &SUPPORTED_P2P_CAPABILITIES[..], + ] + .concat(); + let hello_msg = Message::Hello(p2p::HelloMessage::new( + supported_capabilities, + PublicKey::from(state.signer.verifying_key()), + state.client_version.clone(), + )); + + send(state, hello_msg).await?; + + // Receive Hello message + let msg = match receive(state).await { + Some(msg) => msg?, + None => return Err(RLPxError::Disconnected()), + }; + + match msg { + Message::Hello(hello_message) => { + let mut negotiated_eth_version = 0; + let mut negotiated_snap_version = 0; + + log_peer_debug( + &state.node, + &format!( + "Hello message capabilities {:?}", + hello_message.capabilities + ), + ); + + // Check if we have any capability in common and store the highest version + for cap in &hello_message.capabilities { + match cap.protocol { + "eth" => { + if SUPPORTED_ETH_CAPABILITIES.contains(cap) + && cap.version > negotiated_eth_version + { + negotiated_eth_version = cap.version; + } + } + "snap" => { + if SUPPORTED_SNAP_CAPABILITIES.contains(cap) + && cap.version > negotiated_snap_version + { + negotiated_snap_version = cap.version; + } + } + _ => {} + } + } + + state.capabilities = hello_message.capabilities; + + if negotiated_eth_version == 0 { + return Err(RLPxError::NoMatchingCapabilities()); + } + debug!("Negotatied eth version: eth/{}", negotiated_eth_version); + state.negotiated_eth_capability = Some(Capability::eth(negotiated_eth_version)); + + if negotiated_snap_version != 0 { + debug!("Negotatied snap version: snap/{}", negotiated_snap_version); + state.negotiated_snap_capability = + Some(Capability::snap(negotiated_snap_version)); + } + + state.node.version = Some(hello_message.client_id); + + Ok(()) + } + Message::Disconnect(disconnect) => { + Err(RLPxError::DisconnectReceived(disconnect.reason())) + } + _ => { + // Fail if it is not a hello message + Err(RLPxError::BadRequest("Expected Hello message".to_string())) + } + } +} + +async fn send(state: &mut RLPxConnectionState, message: Message) -> Result<(), RLPxError> { + state.framed.lock().await.send(message).await +} + +/// Reads from the frame until a frame is available. +/// +/// Returns `None` when the stream buffer is 0. This could indicate that the client has disconnected, +/// but we cannot safely assume an EOF, as per the Tokio documentation. +/// +/// If the handshake has not been established, it is reasonable to terminate the connection. +/// +/// For an established connection, [`check_periodic_task`] will detect actual disconnections +/// while sending pings and you should not assume a disconnection. +/// +/// See [`Framed::new`] for more details. +async fn receive(state: &mut RLPxConnectionState) -> Option> { + state.framed.lock().await.next().await +} + +fn spawn_listener(mut conn: RLPxConnectionHandle, node: Node, framed: Arc>>) { + spawned_rt::tasks::spawn(async move { + loop { + match framed.lock().await.next().await { + Some(message) => match message { + Ok(message) => { + log_peer_debug(&node, &format!("Received message {}", message)); + conn.call(InMessage::PeerMessage(message)).await; + return Ok(()) + }, + Err(e) => { + log_peer_debug(&node, &format!("Received RLPX Error in msg {}", e)); + return Err(e); + } + } + None => todo!(), + } + } + }); +} + +async fn handle_message( + state: &mut RLPxConnectionState, + message: Message, +) -> Result<(), RLPxError> { + let peer_supports_eth = state.negotiated_eth_capability.is_some(); + match message { + Message::Disconnect(msg_data) => { + log_peer_debug( + &state.node, + &format!("Received Disconnect: {}", msg_data.reason()), + ); + // TODO handle the disconnection request + return Err(RLPxError::DisconnectReceived(msg_data.reason())); + } + Message::Ping(_) => { + log_peer_debug(&state.node, "Sending pong message"); + send(state, Message::Pong(PongMessage {})).await?; + } + Message::Pong(_) => { + // We ignore received Pong messages + } + Message::Status(msg_data) => { + if let Some(eth) = &state.negotiated_eth_capability { + backend::validate_status(msg_data, &state.storage, eth.version).await? + }; + } + Message::GetAccountRange(req) => { + let response = process_account_range_request(req, state.storage.clone())?; + send(state, Message::AccountRange(response)).await? + } + // TODO(#1129) Add the transaction to the mempool once received. + Message::Transactions(txs) if peer_supports_eth => { + if state.blockchain.is_synced() { + let mut valid_txs = vec![]; + for tx in &txs.transactions { + if let Err(e) = state.blockchain.add_transaction_to_pool(tx.clone()).await { + log_peer_warn(&state.node, &format!("Error adding transaction: {}", e)); + continue; + } + valid_txs.push(tx.clone()); + } + broadcast_message(state, Message::Transactions(Transactions::new(valid_txs)))?; + } + } + Message::GetBlockHeaders(msg_data) if peer_supports_eth => { + let response = BlockHeaders { + id: msg_data.id, + block_headers: msg_data.fetch_headers(&state.storage).await, + }; + send(state, Message::BlockHeaders(response)).await?; + } + Message::GetBlockBodies(msg_data) if peer_supports_eth => { + let response = BlockBodies { + id: msg_data.id, + block_bodies: msg_data.fetch_blocks(&state.storage).await, + }; + send(state, Message::BlockBodies(response)).await?; + } + Message::GetReceipts(GetReceipts { id, block_hashes }) if peer_supports_eth => { + let mut receipts = Vec::new(); + for hash in block_hashes.iter() { + receipts.push(state.storage.get_receipts_for_block(hash)?); + } + let response = Receipts { id, receipts }; + send(state, Message::Receipts(response)).await?; + } + Message::NewPooledTransactionHashes(new_pooled_transaction_hashes) + if peer_supports_eth => + { + //TODO(#1415): evaluate keeping track of requests to avoid sending the same twice. + let hashes = + new_pooled_transaction_hashes.get_transactions_to_request(&state.blockchain)?; + + //TODO(#1416): Evaluate keeping track of the request-id. + let request = GetPooledTransactions::new(random(), hashes); + send(state, Message::GetPooledTransactions(request)).await?; + } + Message::GetPooledTransactions(msg) => { + let response = msg.handle(&state.blockchain)?; + send(state, Message::PooledTransactions(response)).await?; + } + Message::PooledTransactions(msg) if peer_supports_eth => { + if state.blockchain.is_synced() { + msg.handle(&state.node, &state.blockchain).await?; + } + } + Message::GetStorageRanges(req) => { + let response = process_storage_ranges_request(req, state.storage.clone())?; + send(state, Message::StorageRanges(response)).await? + } + Message::GetByteCodes(req) => { + let response = process_byte_codes_request(req, state.storage.clone())?; + send(state, Message::ByteCodes(response)).await? + } + Message::GetTrieNodes(req) => { + let response = process_trie_nodes_request(req, state.storage.clone())?; + send(state, Message::TrieNodes(response)).await? + } + // Send response messages to the backend + // message @ Message::AccountRange(_) + // | message @ Message::StorageRanges(_) + // | message @ Message::ByteCodes(_) + // | message @ Message::TrieNodes(_) + // | message @ Message::BlockBodies(_) + // | message @ Message::BlockHeaders(_) + // | message @ Message::Receipts(_) => sender.send(message).await?, + // TODO: Add new message types and handlers as they are implemented + message => return Err(RLPxError::MessageNotHandled(format!("{message}"))), + }; + Ok(()) +} + +fn broadcast_message(state: &RLPxConnectionState, msg: Message) -> Result<(), RLPxError> { + match msg { + txs_msg @ Message::Transactions(_) => { + let txs = Arc::new(txs_msg); + let task_id = tokio::task::id(); + let Ok(_) = state.connection_broadcast_send.send((task_id, txs)) else { + let error_message = "Could not broadcast received transactions"; + log_peer_error(&state.node, error_message); + return Err(RLPxError::BroadcastError(error_message.to_owned())); + }; + Ok(()) + } + msg => { + let error_message = format!("Broadcasting for msg: {msg} is not supported"); + log_peer_error(&state.node, &error_message); + Err(RLPxError::BroadcastError(error_message)) + } + } +} \ No newline at end of file diff --git a/crates/networking/p2p/rlpx/error.rs b/crates/networking/p2p/rlpx/error.rs index 363e3a02e9..1307e8f5f7 100644 --- a/crates/networking/p2p/rlpx/error.rs +++ b/crates/networking/p2p/rlpx/error.rs @@ -8,7 +8,7 @@ use super::{message::Message, p2p::DisconnectReason}; // TODO improve errors #[derive(Debug, Error)] -pub(crate) enum RLPxError { +pub enum RLPxError { #[error("{0}")] HandshakeError(String), #[error("No matching capabilities")] diff --git a/crates/networking/p2p/rlpx/eth/receipts.rs b/crates/networking/p2p/rlpx/eth/receipts.rs index 09b62301b5..521e629a43 100644 --- a/crates/networking/p2p/rlpx/eth/receipts.rs +++ b/crates/networking/p2p/rlpx/eth/receipts.rs @@ -10,7 +10,7 @@ use ethrex_rlp::{ }; // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getreceipts-0x0f -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct GetReceipts { // id is a u64 chosen by the requesting peer, the responding peer must mirror the value for the response // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#protocol-messages @@ -49,7 +49,7 @@ impl RLPxMessage for GetReceipts { } // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#receipts-0x10 -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct Receipts { // id is a u64 chosen by the requesting peer, the responding peer must mirror the value for the response // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#protocol-messages diff --git a/crates/networking/p2p/rlpx/eth/status.rs b/crates/networking/p2p/rlpx/eth/status.rs index 575aca833f..17e7fc4923 100644 --- a/crates/networking/p2p/rlpx/eth/status.rs +++ b/crates/networking/p2p/rlpx/eth/status.rs @@ -12,7 +12,7 @@ use ethrex_rlp::{ structs::{Decoder, Encoder}, }; -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct StatusMessage { pub(crate) eth_version: u32, pub(crate) network_id: u64, diff --git a/crates/networking/p2p/rlpx/eth/transactions.rs b/crates/networking/p2p/rlpx/eth/transactions.rs index 2f0e84372e..e2eb588fc2 100644 --- a/crates/networking/p2p/rlpx/eth/transactions.rs +++ b/crates/networking/p2p/rlpx/eth/transactions.rs @@ -65,7 +65,7 @@ impl RLPxMessage for Transactions { // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#newpooledtransactionhashes-0x08 // Broadcast message -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct NewPooledTransactionHashes { transaction_types: Bytes, transaction_sizes: Vec, @@ -161,7 +161,7 @@ impl RLPxMessage for NewPooledTransactionHashes { } // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09 -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct GetPooledTransactions { // id is a u64 chosen by the requesting peer, the responding peer must mirror the value for the response // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#protocol-messages @@ -257,7 +257,7 @@ impl RLPxMessage for GetPooledTransactions { } // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#pooledtransactions-0x0a -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct PooledTransactions { // id is a u64 chosen by the requesting peer, the responding peer must mirror the value for the response // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#protocol-messages diff --git a/crates/networking/p2p/rlpx/message.rs b/crates/networking/p2p/rlpx/message.rs index cd6a2e53c9..9848591c1d 100644 --- a/crates/networking/p2p/rlpx/message.rs +++ b/crates/networking/p2p/rlpx/message.rs @@ -26,7 +26,7 @@ pub trait RLPxMessage: Sized { fn decode(msg_data: &[u8]) -> Result; } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) enum Message { Hello(HelloMessage), Disconnect(DisconnectMessage), diff --git a/crates/networking/p2p/rlpx/p2p.rs b/crates/networking/p2p/rlpx/p2p.rs index 13169374ec..cd54b15fa0 100644 --- a/crates/networking/p2p/rlpx/p2p.rs +++ b/crates/networking/p2p/rlpx/p2p.rs @@ -78,7 +78,7 @@ impl Serialize for Capability { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct HelloMessage { pub(crate) capabilities: Vec, pub(crate) node_id: PublicKey, @@ -211,7 +211,7 @@ impl From for u8 { val as u8 } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct DisconnectMessage { pub(crate) reason: Option, } @@ -269,7 +269,7 @@ impl RLPxMessage for DisconnectMessage { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct PingMessage {} impl RLPxMessage for PingMessage { @@ -294,7 +294,7 @@ impl RLPxMessage for PingMessage { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct PongMessage {} impl RLPxMessage for PongMessage { diff --git a/crates/networking/p2p/rlpx/snap.rs b/crates/networking/p2p/rlpx/snap.rs index 16a8fcae09..7e4d60c82a 100644 --- a/crates/networking/p2p/rlpx/snap.rs +++ b/crates/networking/p2p/rlpx/snap.rs @@ -16,7 +16,7 @@ use ethrex_rlp::{ // Snap Capability Messages -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct GetAccountRange { // id is a u64 chosen by the requesting peer, the responding peer must mirror the value for the response pub id: u64, @@ -26,7 +26,7 @@ pub(crate) struct GetAccountRange { pub response_bytes: u64, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct AccountRange { // id is a u64 chosen by the requesting peer, the responding peer must mirror the value for the response pub id: u64, @@ -34,7 +34,7 @@ pub(crate) struct AccountRange { pub proof: Vec, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct GetStorageRanges { pub id: u64, pub root_hash: H256, @@ -44,27 +44,27 @@ pub(crate) struct GetStorageRanges { pub response_bytes: u64, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct StorageRanges { pub id: u64, pub slots: Vec>, pub proof: Vec, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct GetByteCodes { pub id: u64, pub hashes: Vec, pub bytes: u64, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct ByteCodes { pub id: u64, pub codes: Vec, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct GetTrieNodes { pub id: u64, pub root_hash: H256, @@ -74,7 +74,7 @@ pub(crate) struct GetTrieNodes { pub bytes: u64, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct TrieNodes { pub id: u64, pub nodes: Vec, @@ -334,13 +334,13 @@ impl RLPxMessage for TrieNodes { // Intermediate structures -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct AccountRangeUnit { pub hash: H256, pub account: AccountStateSlim, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct AccountStateSlim { pub nonce: u64, pub balance: U256, @@ -348,7 +348,7 @@ pub struct AccountStateSlim { pub code_hash: Bytes, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct StorageSlot { pub hash: H256, pub data: U256, diff --git a/crates/networking/p2p/rlpx/utils.rs b/crates/networking/p2p/rlpx/utils.rs index f02afa3ffa..2924fd144c 100644 --- a/crates/networking/p2p/rlpx/utils.rs +++ b/crates/networking/p2p/rlpx/utils.rs @@ -7,7 +7,7 @@ use k256::{ }; use sha3::{Digest, Keccak256}; use snap::raw::{max_compress_len, Decoder as SnappyDecoder, Encoder as SnappyEncoder}; -use tracing::{debug, error, warn}; +use tracing::{debug, error, info, warn}; pub fn sha256(data: &[u8]) -> [u8; 32] { use k256::sha2::Digest; @@ -73,7 +73,7 @@ pub fn snappy_decompress(msg_data: &[u8]) -> Result, RLPDecodeError> { } pub(crate) fn log_peer_debug(node: &Node, text: &str) { - debug!("[{0}]: {1}", node, text) + info!("[{0}]: {1}", node, text) } pub(crate) fn log_peer_error(node: &Node, text: &str) { From 4983a1a81acd1cb4d8d088454856746911e0e881 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Wed, 11 Jun 2025 11:16:40 -0300 Subject: [PATCH 02/40] Moved handshake into connection spawned process --- crates/l2/sequencer/l1_watcher.rs | 4 +- .../p2p/rlpx/connection/handshake.rs | 129 ++-- .../networking/p2p/rlpx/connection/server.rs | 577 ++++++++++-------- crates/networking/p2p/rlpx/error.rs | 2 + 4 files changed, 411 insertions(+), 301 deletions(-) diff --git a/crates/l2/sequencer/l1_watcher.rs b/crates/l2/sequencer/l1_watcher.rs index 9f1f3c33d7..a814af81e9 100644 --- a/crates/l2/sequencer/l1_watcher.rs +++ b/crates/l2/sequencer/l1_watcher.rs @@ -16,8 +16,8 @@ use tracing::{debug, error, info, warn}; use super::utils::random_duration; -use spawned_concurrency::{send_after, CallResponse, CastResponse, GenServer, GenServerInMsg}; -use spawned_rt::mpsc::Sender; +use spawned_concurrency::tasks::{send_after, CallResponse, CastResponse, GenServer, GenServerInMsg}; +use spawned_rt::tasks::mpsc::Sender; #[derive(Clone)] pub struct L1WatcherState { diff --git a/crates/networking/p2p/rlpx/connection/handshake.rs b/crates/networking/p2p/rlpx/connection/handshake.rs index e4466693b2..14a3f540bf 100644 --- a/crates/networking/p2p/rlpx/connection/handshake.rs +++ b/crates/networking/p2p/rlpx/connection/handshake.rs @@ -1,6 +1,8 @@ +use std::{collections::HashSet, sync::Arc, time::Instant}; + use crate::{ - network::P2PContext, rlpx::{ + connection::server::{Established, InnerState}, error::RLPxError, utils::{ compress_pubkey, decompress_pubkey, ecdh_xchng, kdf, log_peer_debug, sha256, @@ -24,17 +26,20 @@ use k256::{ }; use rand::Rng; use sha3::{Digest, Keccak256}; -use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use tokio::{io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, net::TcpStream, sync::Mutex}; use tokio_util::codec::Framed; use tracing::info; -use super::codec::RLPxCodec; +use super::{codec::RLPxCodec, server::RLPxConnectionState}; type Aes128Ctr64BE = ctr::Ctr64BE; // https://github.com/ethereum/go-ethereum/blob/master/p2p/peer.go#L44 pub const P2P_MAX_MESSAGE_SIZE: usize = 2048; +const PERIODIC_TX_BROADCAST_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); +const PERIODIC_TASKS_CHECK_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); + pub(crate) struct RemoteState { pub(crate) public_key: H512, pub(crate) nonce: H256, @@ -48,38 +53,92 @@ pub(crate) struct LocalState { pub(crate) init_message: Vec, } -pub(crate) async fn new_as_receiver(context: P2PContext, mut stream: S) -> Result<(Framed, H512), RLPxError> -where - S: AsyncRead + AsyncWrite + std::marker::Unpin, -{ - info!("Starting handshake as receiver!"); - let remote_state = receive_auth(&context.signer, &mut stream).await?; - let local_state = send_ack(remote_state.public_key, &mut stream).await?; - let hashed_nonces: [u8; 32] = - Keccak256::digest([local_state.nonce.0, remote_state.nonce.0].concat()).into(); - let codec = RLPxCodec::new(&local_state, &remote_state, hashed_nonces); - Ok((Framed::new(stream, codec), remote_state.public_key)) -} - -pub(crate) async fn new_as_initiator( - context: P2PContext, - node: &Node, - mut stream: S, -) -> Result, RLPxError> -where - S: AsyncRead + AsyncWrite + std::marker::Unpin, -{ - - info!("Starting handshake as initiator!"); - let local_state = send_auth(&context.signer, node.public_key, &mut stream).await?; - let remote_state = receive_ack(&context.signer, node.public_key, &mut stream).await?; - // Local node is initator - // keccak256(nonce || initiator-nonce) - let hashed_nonces: [u8; 32] = - Keccak256::digest([remote_state.nonce.0, local_state.nonce.0].concat()).into(); - let codec = RLPxCodec::new(&local_state, &remote_state, hashed_nonces); - log_peer_debug(node, "Completed handshake as initiator!"); - Ok(Framed::new(stream, codec)) +pub(crate) async fn perform( + state: &mut RLPxConnectionState, + mut stream: TcpStream, +) -> Result { + match &state.0 { + InnerState::Initiator(initiator) => { + info!("Starting handshake as initiator!"); + let context = &initiator.context; + let local_state = send_auth( + &context.signer, + initiator.node.public_key, + &mut stream, + ) + .await?; + let remote_state = receive_ack( + &context.signer, + initiator.node.public_key, + &mut stream, + ) + .await?; + // Local node is initator + // keccak256(nonce || initiator-nonce) + let hashed_nonces: [u8; 32] = + Keccak256::digest([remote_state.nonce.0, local_state.nonce.0].concat()).into(); + let codec = RLPxCodec::new(&local_state, &remote_state, hashed_nonces); + log_peer_debug(&initiator.node, "Completed handshake as initiator!"); + Ok(Established { + signer: context.signer.clone(), + framed: Arc::new(Mutex::new(Framed::new(stream, codec))), + node: initiator.node.clone(), + storage: context.storage.clone(), + blockchain: context.blockchain.clone(), + capabilities: vec![], + negotiated_eth_capability: None, + negotiated_snap_capability: None, + next_periodic_ping: Instant::now() + PERIODIC_TASKS_CHECK_INTERVAL, + next_tx_broadcast: Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL, + broadcasted_txs: HashSet::new(), + client_version: context.client_version.clone(), + connection_broadcast_send: context.broadcast.clone(), + table: context.table.clone(), + inbound: false, + }) + } + InnerState::Receiver(receiver) => { + info!("Starting handshake as receiver!"); + let context = &receiver.context; + let remote_state = + receive_auth(&context.signer, &mut stream).await?; + let local_state = send_ack(remote_state.public_key, &mut stream).await?; + // Remote node is initiator + // keccak256(nonce || initiator-nonce) + let hashed_nonces: [u8; 32] = + Keccak256::digest([local_state.nonce.0, remote_state.nonce.0].concat()).into(); + let codec = RLPxCodec::new(&local_state, &remote_state, hashed_nonces); + let peer_addr = receiver.peer_addr; + let node = Node::new( + peer_addr.ip(), + peer_addr.port(), + peer_addr.port(), + remote_state.public_key, + ); + Ok(Established { + signer: context.signer.clone(), + framed: Arc::new(Mutex::new(Framed::new(stream, codec))), + node: node, + storage: context.storage.clone(), + blockchain: context.blockchain.clone(), + capabilities: vec![], + negotiated_eth_capability: None, + negotiated_snap_capability: None, + next_periodic_ping: Instant::now() + PERIODIC_TASKS_CHECK_INTERVAL, + next_tx_broadcast: Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL, + broadcasted_txs: HashSet::new(), + client_version: context.client_version.clone(), + connection_broadcast_send: context.broadcast.clone(), + table: context.table.clone(), + inbound: true, + }) + } + InnerState::Established(_) => { + return Err(RLPxError::StateError( + "Already established".to_string(), + )) + } + } } async fn send_auth( diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 4599cd3454..d679eb5be9 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -1,44 +1,113 @@ use std::{collections::HashSet, net::SocketAddr, sync::Arc, time::Instant}; use ethrex_blockchain::Blockchain; -use ethrex_common::{types::{MempoolTransaction, Transaction}, H256}; +use ethrex_common::{ + types::{MempoolTransaction, Transaction}, + H256, +}; use ethrex_storage::Store; +use futures::SinkExt; use k256::{ecdsa::SigningKey, PublicKey}; use rand::random; -use spawned_concurrency::tasks::{CallResponse, CastResponse, GenServer, GenServerHandle, GenServerInMsg}; +use spawned_concurrency::tasks::{ + CallResponse, CastResponse, GenServer, GenServerHandle, GenServerInMsg, +}; use spawned_rt::tasks::mpsc::Sender; -use tokio::{net::{TcpSocket, TcpStream}, sync::{broadcast, Mutex}}; -use tokio_util::codec::Framed; +use tokio::{ + net::{TcpSocket, TcpStream}, + sync::{broadcast, Mutex}, +}; use tokio_stream::StreamExt; -use futures::SinkExt; -use tracing::{debug, error}; - -use crate::{discv4::server::MAX_PEERS_TCP_CONNECTIONS, kademlia::{KademliaTable, PeerChannels}, network::P2PContext, rlpx::{error::RLPxError, eth::{backend, blocks::{BlockBodies, BlockHeaders}, receipts::{GetReceipts, Receipts}, transactions::{GetPooledTransactions, NewPooledTransactionHashes, Transactions}}, message::Message, p2p::{self, Capability, DisconnectMessage, DisconnectReason, PongMessage, SUPPORTED_ETH_CAPABILITIES, SUPPORTED_P2P_CAPABILITIES, SUPPORTED_SNAP_CAPABILITIES}, utils::{log_peer_debug, log_peer_error, log_peer_warn}}, snap::{process_account_range_request, process_byte_codes_request, process_storage_ranges_request, process_trie_nodes_request}, types::Node}; +use tokio_util::codec::Framed; +use tracing::{debug, error, info}; + +use crate::{ + discv4::server::MAX_PEERS_TCP_CONNECTIONS, + kademlia::{KademliaTable, PeerChannels}, + network::P2PContext, + rlpx::{ + error::RLPxError, + eth::{ + backend, + blocks::{BlockBodies, BlockHeaders}, + receipts::{GetReceipts, Receipts}, + transactions::{GetPooledTransactions, NewPooledTransactionHashes, Transactions}, + }, + message::Message, + p2p::{ + self, Capability, DisconnectMessage, DisconnectReason, PongMessage, + SUPPORTED_ETH_CAPABILITIES, SUPPORTED_P2P_CAPABILITIES, SUPPORTED_SNAP_CAPABILITIES, + }, + utils::{log_peer_debug, log_peer_error, log_peer_warn}, + }, + snap::{ + process_account_range_request, process_byte_codes_request, process_storage_ranges_request, + process_trie_nodes_request, + }, + types::Node, +}; use super::{codec::RLPxCodec, handshake}; pub(crate) type RLPxConnBroadcastSender = broadcast::Sender<(tokio::task::Id, Arc)>; -const PERIODIC_TX_BROADCAST_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); -const PERIODIC_TASKS_CHECK_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); - type MsgResult = Result; type RLPxConnectionHandle = GenServerHandle; #[derive(Clone)] -pub struct RLPxConnectionState { +pub struct RLPxConnectionState(pub InnerState); + +#[derive(Clone)] +pub struct Initiator { + pub(crate) context: P2PContext, + pub(crate) node: Node, +} + + +#[derive(Clone)] +pub struct Receiver { + pub(crate) context: P2PContext, + pub(crate) peer_addr: SocketAddr, +} + +#[derive(Clone)] +pub struct Established { + pub(crate) signer: SigningKey, + pub(crate) framed: Arc>>, + pub(crate) node: Node, + pub(crate) storage: Store, + pub(crate) blockchain: Arc, + pub(crate) capabilities: Vec, + pub(crate) negotiated_eth_capability: Option, + pub(crate) negotiated_snap_capability: Option, + pub(crate) next_periodic_ping: Instant, + pub(crate) next_tx_broadcast: Instant, + pub(crate) broadcasted_txs: HashSet, + pub(crate) client_version: String, + pub(crate) connection_broadcast_send: RLPxConnBroadcastSender, + pub(crate) table: Arc>, + pub(crate) inbound: bool, +} + +#[derive(Clone)] +pub enum InnerState { + Initiator(Initiator), + Receiver(Receiver), + Established(Established), +} +pub struct RLPxConnectionStatez { pub(crate) signer: SigningKey, - pub(crate) node: Node, - pub(crate) framed: Arc>>, - pub(crate) storage: Store, - pub(crate) blockchain: Arc, - pub(crate) capabilities: Vec, - pub(crate) negotiated_eth_capability: Option, - pub(crate) negotiated_snap_capability: Option, - pub(crate) next_periodic_ping: Instant, - pub(crate) next_tx_broadcast: Instant, - pub(crate) broadcasted_txs: HashSet, - pub(crate) client_version: String, + pub(crate) node: Node, + pub(crate) framed: Arc>>, + pub(crate) storage: Store, + pub(crate) blockchain: Arc, + pub(crate) capabilities: Vec, + pub(crate) negotiated_eth_capability: Option, + pub(crate) negotiated_snap_capability: Option, + pub(crate) next_periodic_ping: Instant, + pub(crate) next_tx_broadcast: Instant, + pub(crate) broadcasted_txs: HashSet, + pub(crate) client_version: String, //// Send end of the channel used to broadcast messages //// to other connected peers, is ok to have it here, //// since internally it's an Arc. @@ -53,89 +122,65 @@ pub struct RLPxConnectionState { } impl RLPxConnectionState { - pub async fn new_as_receiver(context: P2PContext, peer_addr: SocketAddr, stream: TcpStream) -> Result { - let (framed, remote_key) = handshake::new_as_receiver(context.clone(), stream).await?; - let node = Node::new( - peer_addr.ip(), - peer_addr.port(), - peer_addr.port(), - remote_key, - ); - Ok(Self{ - signer: context.signer, - node, - framed: Arc::new(Mutex::new(framed)), - storage: context.storage, - blockchain: context.blockchain, - capabilities: vec![], - negotiated_eth_capability: None, - negotiated_snap_capability: None, - next_periodic_ping: Instant::now() + PERIODIC_TASKS_CHECK_INTERVAL, - next_tx_broadcast: Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL, - broadcasted_txs: HashSet::new(), - client_version: context.client_version, - connection_broadcast_send: context.broadcast, - table: context.table, - inbound: true, - }) + pub fn new_as_receiver(context: P2PContext, peer_addr: SocketAddr) -> Self { + Self(InnerState::Receiver(Receiver { + context, + peer_addr, + })) } - pub async fn new_as_initiator(context: P2PContext, node: &Node, stream: TcpStream) -> Result { - let framed = handshake::new_as_initiator(context.clone(), node, stream).await?; - Ok(Self{ - signer: context.signer, + pub fn new_as_initiator( + context: P2PContext, + node: &Node, + ) -> Self { + Self(InnerState::Initiator(Initiator { + context, node: node.clone(), - framed: Arc::new(Mutex::new(framed)), - storage: context.storage, - blockchain: context.blockchain, - capabilities: vec![], - negotiated_eth_capability: None, - negotiated_snap_capability: None, - next_periodic_ping: Instant::now() + PERIODIC_TASKS_CHECK_INTERVAL, - next_tx_broadcast: Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL, - broadcasted_txs: HashSet::new(), - client_version: context.client_version, - connection_broadcast_send: context.broadcast, - table: context.table, - inbound: false, - }) + })) } } -#[derive(Clone)] pub enum InMessage { + Init(TcpStream), PeerMessage(Message), BroadcastMessage, BackendMessage, PeriodicCheck, } -#[allow(dead_code)] -#[derive(Clone, PartialEq)] +#[derive(Clone)] pub enum OutMessage { + InitResponse { + node: Node, + framed: Arc>> + }, Done, Error, } - + pub struct RLPxConnection {} impl RLPxConnection { - pub async fn spawn_as_receiver(context: P2PContext, peer_addr: SocketAddr, stream: TcpStream) { - match RLPxConnectionState::new_as_receiver(context, peer_addr, stream).await { - Ok(mut state) => { - init(&mut state).await; - let node = state.node.clone(); - let framed = state.framed.clone(); - let conn = RLPxConnection::start(state); - // Send Init message to perform post handshake and initial checks. + info!("spawn_as_receiver"); + let state = RLPxConnectionState::new_as_receiver(context, peer_addr); + info!("r new state"); + let mut conn = RLPxConnection::start(state); + info!("r connected"); + match conn.call(InMessage::Init(stream)).await { + Ok(Ok(OutMessage::InitResponse { node, framed })) => { + info!("r listener"); spawn_listener(conn, node, framed); - } - Err(error) => error!("Error starting RLPxConnection: {}", error), - }; + info!("r done"); + }, + Ok(Ok(_)) => error!("Unexpected response from connection"), + Ok(Err(error)) => error!("Error starting RLPxConnection: {:?}", error), + Err(error) => error!("Unhandled error starting RLPxConnection: {:?}", error), + } } pub async fn spawn_as_initiator(context: P2PContext, node: &Node) { + info!("spawn_as_initiator"); let addr = SocketAddr::new(node.ip, node.tcp_port); let stream = match tcp_stream(addr).await { Ok(result) => result, @@ -145,32 +190,22 @@ impl RLPxConnection { return; } }; - let table = context.table.clone(); - match RLPxConnectionState::new_as_initiator(context, node, stream).await { - Ok(mut state) => { - init(&mut state).await; - let node = state.node.clone(); - let framed = state.framed.clone(); - let conn = RLPxConnection::start(state); - // Send Init message to perform post handshake and initial checks. + info!("i stream"); + let state = RLPxConnectionState::new_as_initiator(context, node); + info!("i new state"); + let mut conn = RLPxConnection::start(state.clone()); + info!("i connected"); + match conn.call(InMessage::Init(stream)).await { + Ok(Ok(OutMessage::InitResponse { node, framed })) => { + info!("i listener"); spawn_listener(conn, node, framed); - } - Err(error) => { - log_peer_debug(node, &format!("Error starting RLPxConnection: {error}")); - table.lock().await.replace_peer(node.node_id()); - } - }; + info!("i done"); + }, + Ok(Ok(_)) => error!("Unexpected response from connection"), + Ok(Err(error)) => error!("Error starting RLPxConnection: {:?}", error), + Err(error) => error!("Unhandled error starting RLPxConnection: {:?}", error), + } } - - // pub async fn spawn_as_receiver_old(context: P2PContext, peer_addr: SocketAddr, stream: TcpStream) { - // let table = context.table.clone(); - // match handshake::as_receiver(context, peer_addr, stream).await { - // Ok(mut conn) => conn.start(table, true).await, - // Err(e) => { - // debug!("Error creating tcp connection with peer at {peer_addr}: {e}") - // } - // } - // } } impl GenServer for RLPxConnection { @@ -183,18 +218,23 @@ impl GenServer for RLPxConnection { Self {} } - async fn handle_call( &mut self, message: Self::InMsg, _tx: &Sender>, state: &mut Self::State, ) -> CallResponse { - match message.clone() { + match message { + InMessage::Init(stream) => { + match init(state, stream).await { + Ok((node, framed)) => CallResponse::Reply(Ok(OutMessage::InitResponse{node, framed})), + Err(e) => CallResponse::Reply(Err(e)), + } + } InMessage::PeerMessage(message) => { let _ = handle_message(state, message).await; CallResponse::Reply(Ok(OutMessage::Done)) - }, + } InMessage::BroadcastMessage => todo!(), InMessage::BackendMessage => todo!(), InMessage::PeriodicCheck => todo!(), @@ -215,22 +255,23 @@ async fn tcp_stream(addr: SocketAddr) -> Result { TcpSocket::new_v4()?.connect(addr).await } -async fn init(state: &mut RLPxConnectionState) { - log_peer_debug(&state.node, "Starting RLPx connection"); - - if let Err(reason) = post_handshake_checks(state.table.clone()).await { +async fn init(state: &mut RLPxConnectionState, stream: TcpStream) -> Result<(Node, Arc>>), RLPxError> { + // TODO handle unwrap() + let mut established_state = handshake::perform(state, stream).await.unwrap(); + log_peer_debug(&established_state.node, "Starting RLPx connection"); + if let Err(reason) = post_handshake_checks(established_state.table.clone()).await { connection_failed( - state, + &mut established_state, "Post handshake validations failed", RLPxError::DisconnectSent(reason), ) .await; - return; + return Err(RLPxError::Disconnected()); } - if let Err(e) = exchange_hello_messages(state).await { - connection_failed(state, "Hello messages exchange failed", e) - .await; + if let Err(e) = exchange_hello_messages(&mut established_state).await { + connection_failed(&mut established_state, "Hello messages exchange failed", e).await; + return Err(RLPxError::Disconnected()); } else { // Handshake OK: handle connection // Create channels to communicate directly to the peer @@ -240,39 +281,36 @@ async fn init(state: &mut RLPxConnectionState) { // but that might not always be the case, so we try to add it to the table // Note: we don't ping the node we let the validation service do its job { - let mut table_lock = state.table.lock().await; - table_lock.insert_node_forced(state.node.clone()); + let mut table_lock = established_state.table.lock().await; + table_lock.insert_node_forced(established_state.node.clone()); table_lock.init_backend_communication( - state.node.node_id(), + established_state.node.node_id(), peer_channels, - state.capabilities.clone(), - state.inbound, + established_state.capabilities.clone(), + established_state.inbound, ); } // TODO Handle this unwrap - let _ = init_peer_conn(state).await.unwrap(); - log_peer_debug(&state.node, "Started peer main loop"); + let _ = init_peer_conn(&mut established_state).await.unwrap(); + log_peer_debug(&established_state.node, "Started peer main loop"); // Subscribe this connection to the broadcasting channel. - let mut broadcaster_receive = if state.negotiated_eth_capability.is_some() { - Some(state.connection_broadcast_send.subscribe()) + let mut broadcaster_receive = if established_state.negotiated_eth_capability.is_some() { + Some(established_state.connection_broadcast_send.clone().subscribe()) } else { None }; // Send transactions transaction hashes from mempool at connection start - send_new_pooled_tx_hashes(state).await.unwrap(); - - // TCP listener loop - let framed = state.framed.clone(); - let node = state.node.clone(); - - // if let Err(e) = self.connection_loop(sender, receiver).await { - // self.connection_failed("Error during RLPx connection", e, state.table.clone()) - // .await; - // } + send_new_pooled_tx_hashes(&mut established_state).await.unwrap(); + + let node = established_state.clone().node; + let framed = established_state.clone().framed; + // New state + state.0 = InnerState::Established(established_state); + Ok((node, framed)) } } -async fn send_new_pooled_tx_hashes(state: &mut RLPxConnectionState) -> Result<(), RLPxError> { +async fn send_new_pooled_tx_hashes(state: &mut Established) -> Result<(), RLPxError> { if SUPPORTED_ETH_CAPABILITIES .iter() .any(|cap| state.capabilities.contains(cap)) @@ -289,9 +327,13 @@ async fn send_new_pooled_tx_hashes(state: &mut RLPxConnectionState) -> Result<() if !txs.is_empty() { let tx_count = txs.len(); for tx in txs { - send(state, Message::NewPooledTransactionHashes( - NewPooledTransactionHashes::new(vec![(*tx).clone()], &state.blockchain)?, - )) + send( + state, + Message::NewPooledTransactionHashes(NewPooledTransactionHashes::new( + vec![(*tx).clone()], + &state.blockchain, + )?), + ) .await?; // Possible improvement: the mempool already knows the hash but the filter function does not return it state.broadcasted_txs.insert((*tx).compute_hash()); @@ -350,7 +392,7 @@ async fn send_new_pooled_tx_hashes(state: &mut RLPxConnectionState) -> Result<() // } // } -async fn init_peer_conn(state: &mut RLPxConnectionState) -> Result<(), RLPxError> { +async fn init_peer_conn(state: &mut Established) -> Result<(), RLPxError> { // Sending eth Status if peer supports it if let Some(eth) = state.negotiated_eth_capability.clone() { let status = backend::get_status(&state.storage, eth.version).await?; @@ -401,7 +443,10 @@ async fn post_handshake_checks( Ok(()) } -async fn send_disconnect_message(state: &mut RLPxConnectionState, reason: Option) { +async fn send_disconnect_message( + state: &mut Established, + reason: Option, +) { send(state, Message::Disconnect(DisconnectMessage { reason })) .await .unwrap_or_else(|_| { @@ -412,18 +457,13 @@ async fn send_disconnect_message(state: &mut RLPxConnectionState, reason: Option }); } -async fn connection_failed( - state: &mut RLPxConnectionState, - error_text: &str, - error: RLPxError, -) { +async fn connection_failed(state: &mut Established, error_text: &str, error: RLPxError) { log_peer_debug(&state.node, &format!("{error_text}: ({error})")); // Send disconnect message only if error is different than RLPxError::DisconnectRequested // because if it is a DisconnectRequested error it means that the peer requested the disconnection, not us. if !matches!(error, RLPxError::DisconnectReceived(_)) { - send_disconnect_message(state, match_disconnect_reason(&error)) - .await; + send_disconnect_message(state, match_disconnect_reason(&error)).await; } // Discard peer from kademlia table in some cases @@ -456,7 +496,7 @@ fn match_disconnect_reason(error: &RLPxError) -> Option { } } -async fn exchange_hello_messages(state: &mut RLPxConnectionState) -> Result<(), RLPxError> { +async fn exchange_hello_messages(state: &mut Established) -> Result<(), RLPxError> { let supported_capabilities: Vec = [ &SUPPORTED_ETH_CAPABILITIES[..], &SUPPORTED_SNAP_CAPABILITIES[..], @@ -521,17 +561,14 @@ async fn exchange_hello_messages(state: &mut RLPxConnectionState) -> Result<(), if negotiated_snap_version != 0 { debug!("Negotatied snap version: snap/{}", negotiated_snap_version); - state.negotiated_snap_capability = - Some(Capability::snap(negotiated_snap_version)); + state.negotiated_snap_capability = Some(Capability::snap(negotiated_snap_version)); } state.node.version = Some(hello_message.client_id); Ok(()) } - Message::Disconnect(disconnect) => { - Err(RLPxError::DisconnectReceived(disconnect.reason())) - } + Message::Disconnect(disconnect) => Err(RLPxError::DisconnectReceived(disconnect.reason())), _ => { // Fail if it is not a hello message Err(RLPxError::BadRequest("Expected Hello message".to_string())) @@ -539,7 +576,7 @@ async fn exchange_hello_messages(state: &mut RLPxConnectionState) -> Result<(), } } -async fn send(state: &mut RLPxConnectionState, message: Message) -> Result<(), RLPxError> { +async fn send(state: &mut Established, message: Message) -> Result<(), RLPxError> { state.framed.lock().await.send(message).await } @@ -554,11 +591,15 @@ async fn send(state: &mut RLPxConnectionState, message: Message) -> Result<(), R /// while sending pings and you should not assume a disconnection. /// /// See [`Framed::new`] for more details. -async fn receive(state: &mut RLPxConnectionState) -> Option> { +async fn receive(state: &mut Established) -> Option> { state.framed.lock().await.next().await } -fn spawn_listener(mut conn: RLPxConnectionHandle, node: Node, framed: Arc>>) { +fn spawn_listener( + mut conn: RLPxConnectionHandle, + node: Node, + framed: Arc>>, +) { spawned_rt::tasks::spawn(async move { loop { match framed.lock().await.next().await { @@ -566,13 +607,13 @@ fn spawn_listener(mut conn: RLPxConnectionHandle, node: Node, framed: Arc { log_peer_debug(&node, &format!("Received message {}", message)); conn.call(InMessage::PeerMessage(message)).await; - return Ok(()) - }, + return Ok(()); + } Err(e) => { log_peer_debug(&node, &format!("Received RLPX Error in msg {}", e)); return Err(e); } - } + }, None => todo!(), } } @@ -583,115 +624,123 @@ async fn handle_message( state: &mut RLPxConnectionState, message: Message, ) -> Result<(), RLPxError> { - let peer_supports_eth = state.negotiated_eth_capability.is_some(); - match message { - Message::Disconnect(msg_data) => { - log_peer_debug( - &state.node, - &format!("Received Disconnect: {}", msg_data.reason()), - ); - // TODO handle the disconnection request - return Err(RLPxError::DisconnectReceived(msg_data.reason())); - } - Message::Ping(_) => { - log_peer_debug(&state.node, "Sending pong message"); - send(state, Message::Pong(PongMessage {})).await?; - } - Message::Pong(_) => { - // We ignore received Pong messages - } - Message::Status(msg_data) => { - if let Some(eth) = &state.negotiated_eth_capability { - backend::validate_status(msg_data, &state.storage, eth.version).await? - }; - } - Message::GetAccountRange(req) => { - let response = process_account_range_request(req, state.storage.clone())?; - send(state, Message::AccountRange(response)).await? - } - // TODO(#1129) Add the transaction to the mempool once received. - Message::Transactions(txs) if peer_supports_eth => { - if state.blockchain.is_synced() { - let mut valid_txs = vec![]; - for tx in &txs.transactions { - if let Err(e) = state.blockchain.add_transaction_to_pool(tx.clone()).await { - log_peer_warn(&state.node, &format!("Error adding transaction: {}", e)); - continue; + if let InnerState::Established(mut established_state) = state.0.clone() { + inner_handle(&mut established_state, message).await?; + state.0 = InnerState::Established(established_state); + Ok(()) + } else { + Err(RLPxError::StateError("Not established".to_string())) + } +} + +async fn inner_handle(established_state: &mut Established, message: Message) -> Result<(), RLPxError> { + let peer_supports_eth = established_state.negotiated_eth_capability.is_some(); + match message { + Message::Disconnect(msg_data) => { + log_peer_debug( + &established_state.node, + &format!("Received Disconnect: {}", msg_data.reason()), + ); + // TODO handle the disconnection request + return Err(RLPxError::DisconnectReceived(msg_data.reason())); + } + Message::Ping(_) => { + log_peer_debug(&established_state.node, "Sending pong message"); + send(established_state, Message::Pong(PongMessage {})).await?; + } + Message::Pong(_) => { + // We ignore received Pong messages + } + Message::Status(msg_data) => { + if let Some(eth) = &established_state.negotiated_eth_capability { + backend::validate_status(msg_data, &established_state.storage, eth.version).await? + }; + } + Message::GetAccountRange(req) => { + let response = process_account_range_request(req, established_state.storage.clone())?; + send(established_state, Message::AccountRange(response)).await? + } + // TODO(#1129) Add the transaction to the mempool once received. + Message::Transactions(txs) if peer_supports_eth => { + if established_state.blockchain.is_synced() { + let mut valid_txs = vec![]; + for tx in &txs.transactions { + if let Err(e) = established_state.blockchain.add_transaction_to_pool(tx.clone()).await { + log_peer_warn(&established_state.node, &format!("Error adding transaction: {}", e)); + continue; + } + valid_txs.push(tx.clone()); } - valid_txs.push(tx.clone()); + broadcast_message(established_state, Message::Transactions(Transactions::new(valid_txs)))?; } - broadcast_message(state, Message::Transactions(Transactions::new(valid_txs)))?; } - } - Message::GetBlockHeaders(msg_data) if peer_supports_eth => { - let response = BlockHeaders { - id: msg_data.id, - block_headers: msg_data.fetch_headers(&state.storage).await, - }; - send(state, Message::BlockHeaders(response)).await?; - } - Message::GetBlockBodies(msg_data) if peer_supports_eth => { - let response = BlockBodies { - id: msg_data.id, - block_bodies: msg_data.fetch_blocks(&state.storage).await, - }; - send(state, Message::BlockBodies(response)).await?; - } - Message::GetReceipts(GetReceipts { id, block_hashes }) if peer_supports_eth => { - let mut receipts = Vec::new(); - for hash in block_hashes.iter() { - receipts.push(state.storage.get_receipts_for_block(hash)?); + Message::GetBlockHeaders(msg_data) if peer_supports_eth => { + let response = BlockHeaders { + id: msg_data.id, + block_headers: msg_data.fetch_headers(&established_state.storage).await, + }; + send(established_state, Message::BlockHeaders(response)).await?; } - let response = Receipts { id, receipts }; - send(state, Message::Receipts(response)).await?; - } - Message::NewPooledTransactionHashes(new_pooled_transaction_hashes) - if peer_supports_eth => - { - //TODO(#1415): evaluate keeping track of requests to avoid sending the same twice. - let hashes = - new_pooled_transaction_hashes.get_transactions_to_request(&state.blockchain)?; - - //TODO(#1416): Evaluate keeping track of the request-id. - let request = GetPooledTransactions::new(random(), hashes); - send(state, Message::GetPooledTransactions(request)).await?; - } - Message::GetPooledTransactions(msg) => { - let response = msg.handle(&state.blockchain)?; - send(state, Message::PooledTransactions(response)).await?; - } - Message::PooledTransactions(msg) if peer_supports_eth => { - if state.blockchain.is_synced() { - msg.handle(&state.node, &state.blockchain).await?; + Message::GetBlockBodies(msg_data) if peer_supports_eth => { + let response = BlockBodies { + id: msg_data.id, + block_bodies: msg_data.fetch_blocks(&established_state.storage).await, + }; + send(established_state, Message::BlockBodies(response)).await?; } - } - Message::GetStorageRanges(req) => { - let response = process_storage_ranges_request(req, state.storage.clone())?; - send(state, Message::StorageRanges(response)).await? - } - Message::GetByteCodes(req) => { - let response = process_byte_codes_request(req, state.storage.clone())?; - send(state, Message::ByteCodes(response)).await? - } - Message::GetTrieNodes(req) => { - let response = process_trie_nodes_request(req, state.storage.clone())?; - send(state, Message::TrieNodes(response)).await? - } - // Send response messages to the backend - // message @ Message::AccountRange(_) - // | message @ Message::StorageRanges(_) - // | message @ Message::ByteCodes(_) - // | message @ Message::TrieNodes(_) - // | message @ Message::BlockBodies(_) - // | message @ Message::BlockHeaders(_) - // | message @ Message::Receipts(_) => sender.send(message).await?, - // TODO: Add new message types and handlers as they are implemented - message => return Err(RLPxError::MessageNotHandled(format!("{message}"))), - }; - Ok(()) + Message::GetReceipts(GetReceipts { id, block_hashes }) if peer_supports_eth => { + let mut receipts = Vec::new(); + for hash in block_hashes.iter() { + receipts.push(established_state.storage.get_receipts_for_block(hash)?); + } + let response = Receipts { id, receipts }; + send(established_state, Message::Receipts(response)).await?; + } + Message::NewPooledTransactionHashes(new_pooled_transaction_hashes) if peer_supports_eth => { + //TODO(#1415): evaluate keeping track of requests to avoid sending the same twice. + let hashes = + new_pooled_transaction_hashes.get_transactions_to_request(&established_state.blockchain)?; + + //TODO(#1416): Evaluate keeping track of the request-id. + let request = GetPooledTransactions::new(random(), hashes); + send(established_state, Message::GetPooledTransactions(request)).await?; + } + Message::GetPooledTransactions(msg) => { + let response = msg.handle(&established_state.blockchain)?; + send(established_state, Message::PooledTransactions(response)).await?; + } + Message::PooledTransactions(msg) if peer_supports_eth => { + if established_state.blockchain.is_synced() { + msg.handle(&established_state.node, &established_state.blockchain).await?; + } + } + Message::GetStorageRanges(req) => { + let response = process_storage_ranges_request(req, established_state.storage.clone())?; + send(established_state, Message::StorageRanges(response)).await? + } + Message::GetByteCodes(req) => { + let response = process_byte_codes_request(req, established_state.storage.clone())?; + send(established_state, Message::ByteCodes(response)).await? + } + Message::GetTrieNodes(req) => { + let response = process_trie_nodes_request(req, established_state.storage.clone())?; + send(established_state, Message::TrieNodes(response)).await? + } + // Send response messages to the backend + // message @ Message::AccountRange(_) + // | message @ Message::StorageRanges(_) + // | message @ Message::ByteCodes(_) + // | message @ Message::TrieNodes(_) + // | message @ Message::BlockBodies(_) + // | message @ Message::BlockHeaders(_) + // | message @ Message::Receipts(_) => sender.send(message).await?, + // TODO: Add new message types and handlers as they are implemented + message => return Err(RLPxError::MessageNotHandled(format!("{message}"))), + }; + Ok(()) } -fn broadcast_message(state: &RLPxConnectionState, msg: Message) -> Result<(), RLPxError> { +fn broadcast_message(state: &Established, msg: Message) -> Result<(), RLPxError> { match msg { txs_msg @ Message::Transactions(_) => { let txs = Arc::new(txs_msg); @@ -709,4 +758,4 @@ fn broadcast_message(state: &RLPxConnectionState, msg: Message) -> Result<(), RL Err(RLPxError::BroadcastError(error_message)) } } -} \ No newline at end of file +} diff --git a/crates/networking/p2p/rlpx/error.rs b/crates/networking/p2p/rlpx/error.rs index 1307e8f5f7..cb7f8cfd2f 100644 --- a/crates/networking/p2p/rlpx/error.rs +++ b/crates/networking/p2p/rlpx/error.rs @@ -11,6 +11,8 @@ use super::{message::Message, p2p::DisconnectReason}; pub enum RLPxError { #[error("{0}")] HandshakeError(String), + #[error("Invalid connection state: {0}")] + StateError(String), #[error("No matching capabilities")] NoMatchingCapabilities(), #[error("Peer disconnected")] From ee013d6fbab55bfef34dcb8b3f5edfa9804f7c22 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Fri, 13 Jun 2025 15:50:57 -0300 Subject: [PATCH 03/40] Connected backend channels and added periodic checks --- Cargo.lock | 4 +- Cargo.toml | 4 +- crates/l2/sequencer/l1_watcher.rs | 17 +- crates/networking/p2p/discv4/server.rs | 2 +- crates/networking/p2p/kademlia.rs | 14 +- crates/networking/p2p/network.rs | 4 +- crates/networking/p2p/peer_handler.rs | 91 ++-- .../networking/p2p/rlpx/connection/codec.rs | 2 +- .../p2p/rlpx/connection/handshake.rs | 44 +- crates/networking/p2p/rlpx/connection/mod.rs | 4 +- .../networking/p2p/rlpx/connection/server.rs | 430 ++++++++++-------- 11 files changed, 346 insertions(+), 270 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c07c8c3e43..cf57f6abbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9535,7 +9535,7 @@ dependencies = [ [[package]] name = "spawned-concurrency" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?rev=ded9a1f1d41b82020481913ad821e8cfae2b9f19#ded9a1f1d41b82020481913ad821e8cfae2b9f19" +source = "git+https://github.com/lambdaclass/spawned.git?rev=0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf#0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf" dependencies = [ "futures", "spawned-rt", @@ -9545,7 +9545,7 @@ dependencies = [ [[package]] name = "spawned-rt" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?rev=ded9a1f1d41b82020481913ad821e8cfae2b9f19#ded9a1f1d41b82020481913ad821e8cfae2b9f19" +source = "git+https://github.com/lambdaclass/spawned.git?rev=0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf#0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf" dependencies = [ "crossbeam 0.7.3", "tokio", diff --git a/Cargo.toml b/Cargo.toml index eb9fb395ee..fe6c13b21b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,5 +88,5 @@ libsecp256k1 = "0.7.2" clap = { version = "4.3", features = ["derive", "env"] } clap_complete = "4.5.17" eyre = "0.6.12" -spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", rev = "ded9a1f1d41b82020481913ad821e8cfae2b9f19"} -spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", rev = "ded9a1f1d41b82020481913ad821e8cfae2b9f19"} +spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", rev = "0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf"} +spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", rev = "0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf"} diff --git a/crates/l2/sequencer/l1_watcher.rs b/crates/l2/sequencer/l1_watcher.rs index a814af81e9..977168a62f 100644 --- a/crates/l2/sequencer/l1_watcher.rs +++ b/crates/l2/sequencer/l1_watcher.rs @@ -16,7 +16,9 @@ use tracing::{debug, error, info, warn}; use super::utils::random_duration; -use spawned_concurrency::tasks::{send_after, CallResponse, CastResponse, GenServer, GenServerInMsg}; +use spawned_concurrency::tasks::{ + send_after, CallResponse, CastResponse, GenServer, GenServerInMsg, +}; use spawned_rt::tasks::mpsc::Sender; #[derive(Clone)] @@ -84,7 +86,8 @@ impl L1Watcher { } impl GenServer for L1Watcher { - type InMsg = InMessage; + type CastMsg = InMessage; + type CallMsg = (); type OutMsg = OutMessage; type State = L1WatcherState; type Error = L1WatcherError; @@ -95,8 +98,8 @@ impl GenServer for L1Watcher { async fn handle_call( &mut self, - _message: Self::InMsg, - _tx: &Sender>, + _message: Self::CallMsg, + _handle: &GenServerHandle, _state: &mut Self::State, ) -> CallResponse { CallResponse::Reply(OutMessage::Done) @@ -104,14 +107,14 @@ impl GenServer for L1Watcher { async fn handle_cast( &mut self, - message: Self::InMsg, - tx: &Sender>, + message: Self::CastMsg, + handle: &GenServerHandle, state: &mut Self::State, ) -> CastResponse { match message { Self::InMsg::Watch => { let check_interval = random_duration(state.check_interval); - send_after(check_interval, tx.clone(), Self::InMsg::Watch); + send_after(check_interval, handle.clone(), Self::InMsg::Watch); match get_logs(state).await { Ok(logs) => { // We may not have a deposit nor a withdrawal, that means no events -> no logs. diff --git a/crates/networking/p2p/discv4/server.rs b/crates/networking/p2p/discv4/server.rs index 588dccf7e3..24797859f8 100644 --- a/crates/networking/p2p/discv4/server.rs +++ b/crates/networking/p2p/discv4/server.rs @@ -517,7 +517,7 @@ impl Discv4Server { if active_connections >= MAX_PEERS_TCP_CONNECTIONS { return Ok(()); } - + RLPxConnection::spawn_as_initiator(self.ctx.clone(), &peer.node).await; Ok(()) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index f7d1d5540b..3cf7193637 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -2,10 +2,11 @@ use std::sync::Arc; use crate::{ discv4::messages::FindNodeRequest, - rlpx::{message::Message as RLPxMessage, p2p::Capability}, + rlpx::{connection::server::RLPxConnection, message::Message as RLPxMessage, p2p::Capability}, types::{Node, NodeRecord}, }; use ethrex_common::{H256, U256}; +use spawned_concurrency::tasks::GenServerHandle; use tokio::sync::mpsc::UnboundedSender; use tokio::sync::{mpsc, Mutex}; use tracing::debug; @@ -414,25 +415,24 @@ pub const MAX_MESSAGES_IN_PEER_CHANNEL: usize = 25; #[derive(Debug, Clone)] /// Holds the respective sender and receiver ends of the communication channels bewteen the peer data and its active connection pub struct PeerChannels { - pub(crate) sender: mpsc::Sender, + pub(crate) connection: GenServerHandle, pub(crate) receiver: Arc>>, } impl PeerChannels { /// Sets up the communication channels for the peer /// Returns the channel endpoints to send to the active connection's listen loop - pub(crate) fn create() -> (Self, mpsc::Sender, mpsc::Receiver) { - let (sender, connection_receiver) = - mpsc::channel::(MAX_MESSAGES_IN_PEER_CHANNEL); + pub(crate) fn create( + connection: GenServerHandle, + ) -> (Self, mpsc::Sender) { let (connection_sender, receiver) = mpsc::channel::(MAX_MESSAGES_IN_PEER_CHANNEL); ( Self { - sender, + connection, receiver: Arc::new(Mutex::new(receiver)), }, connection_sender, - connection_receiver, ) } } diff --git a/crates/networking/p2p/network.rs b/crates/networking/p2p/network.rs index 0276f0408e..b8edfa0be4 100644 --- a/crates/networking/p2p/network.rs +++ b/crates/networking/p2p/network.rs @@ -1,9 +1,9 @@ +use crate::discv4::server::{DiscoveryError, Discv4Server}; use crate::kademlia::{self, KademliaTable}; use crate::rlpx::connection::server::{RLPxConnBroadcastSender, RLPxConnection}; -use crate::rlpx::p2p::SUPPORTED_SNAP_CAPABILITIES; use crate::rlpx::message::Message as RLPxMessage; +use crate::rlpx::p2p::SUPPORTED_SNAP_CAPABILITIES; use crate::types::{Node, NodeRecord}; -use crate::discv4::server::{DiscoveryError, Discv4Server}; use ethrex_blockchain::Blockchain; use ethrex_common::{H256, H512}; use ethrex_storage::Store; diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index 035fa86e22..40e3671123 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -13,6 +13,7 @@ use tokio::sync::Mutex; use crate::{ kademlia::{KademliaTable, PeerChannels, PeerData}, rlpx::{ + connection::server::CastMessage, eth::{ blocks::{ BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, BLOCK_HEADER_LIMIT, @@ -105,12 +106,16 @@ impl PeerHandler { skip: 0, reverse: matches!(order, BlockRequestOrder::NewToOld), }); - let (peer_id, peer_channel) = self + let (peer_id, mut peer_channel) = self .get_peer_channel_with_retry(&SUPPORTED_ETH_CAPABILITIES) .await?; let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel.sender.send(request).await { - debug!("Failed to send message to peer: {err}"); + if let Err(err) = peer_channel + .connection + .cast(CastMessage::BackendMessage(request)) + .await + { + debug!("Failed to send message to peer: {err:?}"); continue; } if let Some(block_headers) = tokio::time::timeout(PEER_REPLY_TIMEOUT, async move { @@ -162,12 +167,16 @@ impl PeerHandler { id: request_id, block_hashes: block_hashes.clone(), }); - let (peer_id, peer_channel) = self + let (peer_id, mut peer_channel) = self .get_peer_channel_with_retry(&SUPPORTED_ETH_CAPABILITIES) .await?; let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel.sender.send(request).await { - debug!("Failed to send message to peer: {err}"); + if let Err(err) = peer_channel + .connection + .cast(CastMessage::BackendMessage(request)) + .await + { + debug!("Failed to send message to peer: {err:?}"); return None; } if let Some(block_bodies) = tokio::time::timeout(PEER_REPLY_TIMEOUT, async move { @@ -275,12 +284,16 @@ impl PeerHandler { id: request_id, block_hashes: block_hashes.clone(), }); - let (_, peer_channel) = self + let (_, mut peer_channel) = self .get_peer_channel_with_retry(&SUPPORTED_ETH_CAPABILITIES) .await?; let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel.sender.send(request).await { - debug!("Failed to send message to peer: {err}"); + if let Err(err) = peer_channel + .connection + .cast(CastMessage::BackendMessage(request)) + .await + { + debug!("Failed to send message to peer: {err:?}"); continue; } if let Some(receipts) = tokio::time::timeout(PEER_REPLY_TIMEOUT, async move { @@ -331,12 +344,16 @@ impl PeerHandler { limit_hash: limit, response_bytes: MAX_RESPONSE_BYTES, }); - let (_, peer_channel) = self + let (_, mut peer_channel) = self .get_peer_channel_with_retry(&SUPPORTED_SNAP_CAPABILITIES) .await?; let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel.sender.send(request).await { - debug!("Failed to send message to peer: {err}"); + if let Err(err) = peer_channel + .connection + .cast(CastMessage::BackendMessage(request)) + .await + { + debug!("Failed to send message to peer: {err:?}"); continue; } if let Some((accounts, proof)) = tokio::time::timeout(PEER_REPLY_TIMEOUT, async move { @@ -394,12 +411,16 @@ impl PeerHandler { hashes: hashes.clone(), bytes: MAX_RESPONSE_BYTES, }); - let (_, peer_channel) = self + let (_, mut peer_channel) = self .get_peer_channel_with_retry(&SUPPORTED_SNAP_CAPABILITIES) .await?; let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel.sender.send(request).await { - debug!("Failed to send message to peer: {err}"); + if let Err(err) = peer_channel + .connection + .cast(CastMessage::BackendMessage(request)) + .await + { + debug!("Failed to send message to peer: {err:?}"); continue; } if let Some(codes) = tokio::time::timeout(PEER_REPLY_TIMEOUT, async move { @@ -451,12 +472,16 @@ impl PeerHandler { limit_hash: HASH_MAX, response_bytes: MAX_RESPONSE_BYTES, }); - let (_, peer_channel) = self + let (_, mut peer_channel) = self .get_peer_channel_with_retry(&SUPPORTED_SNAP_CAPABILITIES) .await?; let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel.sender.send(request).await { - debug!("Failed to send message to peer: {err}"); + if let Err(err) = peer_channel + .connection + .cast(CastMessage::BackendMessage(request)) + .await + { + debug!("Failed to send message to peer: {err:?}"); continue; } if let Some((mut slots, proof)) = tokio::time::timeout(PEER_REPLY_TIMEOUT, async move { @@ -552,12 +577,16 @@ impl PeerHandler { .collect(), bytes: MAX_RESPONSE_BYTES, }); - let (_, peer_channel) = self + let (_, mut peer_channel) = self .get_peer_channel_with_retry(&SUPPORTED_SNAP_CAPABILITIES) .await?; let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel.sender.send(request).await { - debug!("Failed to send message to peer: {err}"); + if let Err(err) = peer_channel + .connection + .cast(CastMessage::BackendMessage(request)) + .await + { + debug!("Failed to send message to peer: {err:?}"); continue; } if let Some(nodes) = tokio::time::timeout(PEER_REPLY_TIMEOUT, async move { @@ -626,12 +655,16 @@ impl PeerHandler { .collect(), bytes: MAX_RESPONSE_BYTES, }); - let (_, peer_channel) = self + let (_, mut peer_channel) = self .get_peer_channel_with_retry(&SUPPORTED_SNAP_CAPABILITIES) .await?; let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel.sender.send(request).await { - debug!("Failed to send message to peer: {err}"); + if let Err(err) = peer_channel + .connection + .cast(CastMessage::BackendMessage(request)) + .await + { + debug!("Failed to send message to peer: {err:?}"); continue; } if let Some(nodes) = tokio::time::timeout(PEER_REPLY_TIMEOUT, async move { @@ -693,12 +726,16 @@ impl PeerHandler { limit_hash: HASH_MAX, response_bytes: MAX_RESPONSE_BYTES, }); - let (_, peer_channel) = self + let (_, mut peer_channel) = self .get_peer_channel_with_retry(&SUPPORTED_SNAP_CAPABILITIES) .await?; let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel.sender.send(request).await { - debug!("Failed to send message to peer: {err}"); + if let Err(err) = peer_channel + .connection + .cast(CastMessage::BackendMessage(request)) + .await + { + debug!("Failed to send message to peer: {err:?}"); continue; } if let Some((mut slots, proof)) = tokio::time::timeout(PEER_REPLY_TIMEOUT, async move { diff --git a/crates/networking/p2p/rlpx/connection/codec.rs b/crates/networking/p2p/rlpx/connection/codec.rs index 2df38b1d10..f5c4532fa1 100644 --- a/crates/networking/p2p/rlpx/connection/codec.rs +++ b/crates/networking/p2p/rlpx/connection/codec.rs @@ -1,4 +1,4 @@ -use crate::rlpx::{message as rlpx, error::RLPxError, utils::ecdh_xchng}; +use crate::rlpx::{error::RLPxError, message as rlpx, utils::ecdh_xchng}; use super::handshake::{LocalState, RemoteState}; use aes::{ diff --git a/crates/networking/p2p/rlpx/connection/handshake.rs b/crates/networking/p2p/rlpx/connection/handshake.rs index 14a3f540bf..9ee01f3677 100644 --- a/crates/networking/p2p/rlpx/connection/handshake.rs +++ b/crates/networking/p2p/rlpx/connection/handshake.rs @@ -2,7 +2,9 @@ use std::{collections::HashSet, sync::Arc, time::Instant}; use crate::{ rlpx::{ - connection::server::{Established, InnerState}, + connection::server::{ + Established, InnerState, PERIODIC_PING_INTERVAL, PERIODIC_TX_BROADCAST_INTERVAL, + }, error::RLPxError, utils::{ compress_pubkey, decompress_pubkey, ecdh_xchng, kdf, log_peer_debug, sha256, @@ -26,7 +28,11 @@ use k256::{ }; use rand::Rng; use sha3::{Digest, Keccak256}; -use tokio::{io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, net::TcpStream, sync::Mutex}; +use tokio::{ + io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, + net::TcpStream, + sync::Mutex, +}; use tokio_util::codec::Framed; use tracing::info; @@ -37,9 +43,6 @@ type Aes128Ctr64BE = ctr::Ctr64BE; // https://github.com/ethereum/go-ethereum/blob/master/p2p/peer.go#L44 pub const P2P_MAX_MESSAGE_SIZE: usize = 2048; -const PERIODIC_TX_BROADCAST_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); -const PERIODIC_TASKS_CHECK_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); - pub(crate) struct RemoteState { pub(crate) public_key: H512, pub(crate) nonce: H256, @@ -61,18 +64,10 @@ pub(crate) async fn perform( InnerState::Initiator(initiator) => { info!("Starting handshake as initiator!"); let context = &initiator.context; - let local_state = send_auth( - &context.signer, - initiator.node.public_key, - &mut stream, - ) - .await?; - let remote_state = receive_ack( - &context.signer, - initiator.node.public_key, - &mut stream, - ) - .await?; + let local_state = + send_auth(&context.signer, initiator.node.public_key, &mut stream).await?; + let remote_state = + receive_ack(&context.signer, initiator.node.public_key, &mut stream).await?; // Local node is initator // keccak256(nonce || initiator-nonce) let hashed_nonces: [u8; 32] = @@ -88,24 +83,24 @@ pub(crate) async fn perform( capabilities: vec![], negotiated_eth_capability: None, negotiated_snap_capability: None, - next_periodic_ping: Instant::now() + PERIODIC_TASKS_CHECK_INTERVAL, + next_periodic_ping: Instant::now() + PERIODIC_PING_INTERVAL, next_tx_broadcast: Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL, broadcasted_txs: HashSet::new(), client_version: context.client_version.clone(), connection_broadcast_send: context.broadcast.clone(), table: context.table.clone(), + backend_channel: None, inbound: false, }) } InnerState::Receiver(receiver) => { info!("Starting handshake as receiver!"); let context = &receiver.context; - let remote_state = - receive_auth(&context.signer, &mut stream).await?; + let remote_state = receive_auth(&context.signer, &mut stream).await?; let local_state = send_ack(remote_state.public_key, &mut stream).await?; // Remote node is initiator // keccak256(nonce || initiator-nonce) - let hashed_nonces: [u8; 32] = + let hashed_nonces: [u8; 32] = Keccak256::digest([local_state.nonce.0, remote_state.nonce.0].concat()).into(); let codec = RLPxCodec::new(&local_state, &remote_state, hashed_nonces); let peer_addr = receiver.peer_addr; @@ -124,19 +119,18 @@ pub(crate) async fn perform( capabilities: vec![], negotiated_eth_capability: None, negotiated_snap_capability: None, - next_periodic_ping: Instant::now() + PERIODIC_TASKS_CHECK_INTERVAL, + next_periodic_ping: Instant::now() + PERIODIC_PING_INTERVAL, next_tx_broadcast: Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL, broadcasted_txs: HashSet::new(), client_version: context.client_version.clone(), connection_broadcast_send: context.broadcast.clone(), table: context.table.clone(), + backend_channel: None, inbound: true, }) } InnerState::Established(_) => { - return Err(RLPxError::StateError( - "Already established".to_string(), - )) + return Err(RLPxError::StateError("Already established".to_string())) } } } diff --git a/crates/networking/p2p/rlpx/connection/mod.rs b/crates/networking/p2p/rlpx/connection/mod.rs index c036883080..fcb8ad0ba4 100644 --- a/crates/networking/p2p/rlpx/connection/mod.rs +++ b/crates/networking/p2p/rlpx/connection/mod.rs @@ -1,3 +1,3 @@ -pub mod server; +mod codec; mod handshake; -mod codec; \ No newline at end of file +pub mod server; diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index d679eb5be9..d0c486b53b 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -10,12 +10,11 @@ use futures::SinkExt; use k256::{ecdsa::SigningKey, PublicKey}; use rand::random; use spawned_concurrency::tasks::{ - CallResponse, CastResponse, GenServer, GenServerHandle, GenServerInMsg, + send_after, CallResponse, CastResponse, GenServer, GenServerHandle, }; -use spawned_rt::tasks::mpsc::Sender; use tokio::{ net::{TcpSocket, TcpStream}, - sync::{broadcast, Mutex}, + sync::{broadcast, mpsc::Sender, Mutex}, }; use tokio_stream::StreamExt; use tokio_util::codec::Framed; @@ -35,7 +34,7 @@ use crate::{ }, message::Message, p2p::{ - self, Capability, DisconnectMessage, DisconnectReason, PongMessage, + self, Capability, DisconnectMessage, DisconnectReason, PingMessage, PongMessage, SUPPORTED_ETH_CAPABILITIES, SUPPORTED_P2P_CAPABILITIES, SUPPORTED_SNAP_CAPABILITIES, }, utils::{log_peer_debug, log_peer_error, log_peer_warn}, @@ -49,6 +48,11 @@ use crate::{ use super::{codec::RLPxCodec, handshake}; +const PERIODIC_TASKS_CHECK_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); +pub(crate) const PERIODIC_PING_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10); +pub(crate) const PERIODIC_TX_BROADCAST_INTERVAL: std::time::Duration = + std::time::Duration::from_millis(500); + pub(crate) type RLPxConnBroadcastSender = broadcast::Sender<(tokio::task::Id, Arc)>; type MsgResult = Result; @@ -63,7 +67,6 @@ pub struct Initiator { pub(crate) node: Node, } - #[derive(Clone)] pub struct Receiver { pub(crate) context: P2PContext, @@ -84,8 +87,17 @@ pub struct Established { pub(crate) next_tx_broadcast: Instant, pub(crate) broadcasted_txs: HashSet, pub(crate) client_version: String, + //// Send end of the channel used to broadcast messages + //// to other connected peers, is ok to have it here, + //// since internally it's an Arc. + //// The ID is to ignore the message sent from the same task. + //// This is used both to send messages and to received broadcasted + //// messages from other connections (sent from other peers). + //// The receive end is instantiated after the handshake is completed + //// under `handle_peer`. pub(crate) connection_broadcast_send: RLPxConnBroadcastSender, pub(crate) table: Arc>, + pub(crate) backend_channel: Option>, pub(crate) inbound: bool, } @@ -95,44 +107,13 @@ pub enum InnerState { Receiver(Receiver), Established(Established), } -pub struct RLPxConnectionStatez { - pub(crate) signer: SigningKey, - pub(crate) node: Node, - pub(crate) framed: Arc>>, - pub(crate) storage: Store, - pub(crate) blockchain: Arc, - pub(crate) capabilities: Vec, - pub(crate) negotiated_eth_capability: Option, - pub(crate) negotiated_snap_capability: Option, - pub(crate) next_periodic_ping: Instant, - pub(crate) next_tx_broadcast: Instant, - pub(crate) broadcasted_txs: HashSet, - pub(crate) client_version: String, - //// Send end of the channel used to broadcast messages - //// to other connected peers, is ok to have it here, - //// since internally it's an Arc. - //// The ID is to ignore the message sent from the same task. - //// This is used both to send messages and to received broadcasted - //// messages from other connections (sent from other peers). - //// The receive end is instantiated after the handshake is completed - //// under `handle_peer`. - connection_broadcast_send: RLPxConnBroadcastSender, - table: Arc>, - inbound: bool, -} impl RLPxConnectionState { pub fn new_as_receiver(context: P2PContext, peer_addr: SocketAddr) -> Self { - Self(InnerState::Receiver(Receiver { - context, - peer_addr, - })) + Self(InnerState::Receiver(Receiver { context, peer_addr })) } - pub fn new_as_initiator( - context: P2PContext, - node: &Node, - ) -> Self { + pub fn new_as_initiator(context: P2PContext, node: &Node) -> Self { Self(InnerState::Initiator(Initiator { context, node: node.clone(), @@ -140,11 +121,14 @@ impl RLPxConnectionState { } } -pub enum InMessage { +pub enum CallMessage { Init(TcpStream), +} + +pub enum CastMessage { PeerMessage(Message), BroadcastMessage, - BackendMessage, + BackendMessage(Message), PeriodicCheck, } @@ -152,12 +136,13 @@ pub enum InMessage { pub enum OutMessage { InitResponse { node: Node, - framed: Arc>> + framed: Arc>>, }, Done, Error, } +#[derive(Debug)] pub struct RLPxConnection {} impl RLPxConnection { @@ -167,12 +152,12 @@ impl RLPxConnection { info!("r new state"); let mut conn = RLPxConnection::start(state); info!("r connected"); - match conn.call(InMessage::Init(stream)).await { + match conn.call(CallMessage::Init(stream)).await { Ok(Ok(OutMessage::InitResponse { node, framed })) => { info!("r listener"); spawn_listener(conn, node, framed); info!("r done"); - }, + } Ok(Ok(_)) => error!("Unexpected response from connection"), Ok(Err(error)) => error!("Error starting RLPxConnection: {:?}", error), Err(error) => error!("Unhandled error starting RLPxConnection: {:?}", error), @@ -195,12 +180,12 @@ impl RLPxConnection { info!("i new state"); let mut conn = RLPxConnection::start(state.clone()); info!("i connected"); - match conn.call(InMessage::Init(stream)).await { + match conn.call(CallMessage::Init(stream)).await { Ok(Ok(OutMessage::InitResponse { node, framed })) => { info!("i listener"); spawn_listener(conn, node, framed); info!("i done"); - }, + } Ok(Ok(_)) => error!("Unexpected response from connection"), Ok(Err(error)) => error!("Error starting RLPxConnection: {:?}", error), Err(error) => error!("Unhandled error starting RLPxConnection: {:?}", error), @@ -209,7 +194,8 @@ impl RLPxConnection { } impl GenServer for RLPxConnection { - type InMsg = InMessage; + type CallMsg = CallMessage; + type CastMsg = CastMessage; type OutMsg = MsgResult; type State = RLPxConnectionState; type Error = RLPxError; @@ -220,34 +206,48 @@ impl GenServer for RLPxConnection { async fn handle_call( &mut self, - message: Self::InMsg, - _tx: &Sender>, + message: Self::CallMsg, + handle: &RLPxConnectionHandle, state: &mut Self::State, ) -> CallResponse { match message { - InMessage::Init(stream) => { - match init(state, stream).await { - Ok((node, framed)) => CallResponse::Reply(Ok(OutMessage::InitResponse{node, framed})), - Err(e) => CallResponse::Reply(Err(e)), + Self::CallMsg::Init(stream) => match init(state, handle, stream).await { + Ok((node, framed)) => { + CallResponse::Reply(Ok(OutMessage::InitResponse { node, framed })) } - } - InMessage::PeerMessage(message) => { - let _ = handle_message(state, message).await; - CallResponse::Reply(Ok(OutMessage::Done)) - } - InMessage::BroadcastMessage => todo!(), - InMessage::BackendMessage => todo!(), - InMessage::PeriodicCheck => todo!(), + Err(e) => CallResponse::Reply(Err(e)), + }, } } async fn handle_cast( &mut self, - _message: Self::InMsg, - _tx: &Sender>, - _state: &mut Self::State, + message: Self::CastMsg, + handle: &RLPxConnectionHandle, + state: &mut Self::State, ) -> CastResponse { - CastResponse::NoReply + if let InnerState::Established(mut established_state) = state.0.clone() { + match message { + // TODO: handle all these "let _" + Self::CastMsg::PeerMessage(message) => { + let _ = handle_peer_message(&mut established_state, message).await; + } + Self::CastMsg::BroadcastMessage => todo!(), + Self::CastMsg::BackendMessage(message) => { + let _ = handle_backend_message(&mut established_state, message).await; + } + Self::CastMsg::PeriodicCheck => { + let _ = check_periodic_tasks(&mut established_state, handle).await; + } + } + // Update the state state + state.0 = InnerState::Established(established_state); + CastResponse::NoReply + } else { + // Received a Cast message but connection is not ready. Log an error but keep the connection alive. + error!("Connection not yet established"); + CastResponse::NoReply + } } } @@ -255,9 +255,12 @@ async fn tcp_stream(addr: SocketAddr) -> Result { TcpSocket::new_v4()?.connect(addr).await } -async fn init(state: &mut RLPxConnectionState, stream: TcpStream) -> Result<(Node, Arc>>), RLPxError> { - // TODO handle unwrap() - let mut established_state = handshake::perform(state, stream).await.unwrap(); +async fn init( + state: &mut RLPxConnectionState, + handle: &RLPxConnectionHandle, + stream: TcpStream, +) -> Result<(Node, Arc>>), RLPxError> { + let mut established_state = handshake::perform(state, stream).await?; log_peer_debug(&established_state.node, "Starting RLPx connection"); if let Err(reason) = post_handshake_checks(established_state.table.clone()).await { connection_failed( @@ -275,7 +278,10 @@ async fn init(state: &mut RLPxConnectionState, stream: TcpStream) -> Result<(Nod } else { // Handshake OK: handle connection // Create channels to communicate directly to the peer - let (peer_channels, sender, receiver) = PeerChannels::create(); + let (peer_channels, sender) = PeerChannels::create(handle.clone()); + + // Updating the state to establish the backend channel + established_state.backend_channel = Some(sender); // NOTE: if the peer came from the discovery server it will already be inserted in the table // but that might not always be the case, so we try to add it to the table @@ -290,17 +296,30 @@ async fn init(state: &mut RLPxConnectionState, stream: TcpStream) -> Result<(Nod established_state.inbound, ); } - // TODO Handle this unwrap - let _ = init_peer_conn(&mut established_state).await.unwrap(); - log_peer_debug(&established_state.node, "Started peer main loop"); + init_peer_conn(&mut established_state).await?; + log_peer_debug(&established_state.node, "Peer connection initialized."); // Subscribe this connection to the broadcasting channel. - let mut broadcaster_receive = if established_state.negotiated_eth_capability.is_some() { - Some(established_state.connection_broadcast_send.clone().subscribe()) + // TODO this channel is not yet connected. Broadcast is not working + let broadcaster_receive = if established_state.negotiated_eth_capability.is_some() { + Some( + established_state + .connection_broadcast_send + .clone() + .subscribe(), + ) } else { None }; // Send transactions transaction hashes from mempool at connection start - send_new_pooled_tx_hashes(&mut established_state).await.unwrap(); + send_new_pooled_tx_hashes(&mut established_state) + .await + .unwrap(); + + send_after( + PERIODIC_TASKS_CHECK_INTERVAL, + handle.clone(), + CastMessage::PeriodicCheck, + ); let node = established_state.clone().node; let framed = established_state.clone().framed; @@ -310,6 +329,27 @@ async fn init(state: &mut RLPxConnectionState, stream: TcpStream) -> Result<(Nod } } +async fn check_periodic_tasks( + state: &mut Established, + handle: &RLPxConnectionHandle, +) -> Result<(), RLPxError> { + send_after( + PERIODIC_TASKS_CHECK_INTERVAL, + handle.clone(), + CastMessage::PeriodicCheck, + ); + if Instant::now() >= state.next_periodic_ping { + send(state, Message::Ping(PingMessage {})).await?; + log_peer_debug(&state.node, "Ping sent"); + state.next_periodic_ping = Instant::now() + PERIODIC_PING_INTERVAL; + } + if Instant::now() >= state.next_tx_broadcast { + send_new_pooled_tx_hashes(state).await?; + state.next_tx_broadcast = Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL; + } + Ok(()) +} + async fn send_new_pooled_tx_hashes(state: &mut Established) -> Result<(), RLPxError> { if SUPPORTED_ETH_CAPABILITIES .iter() @@ -423,7 +463,6 @@ async fn init_peer_conn(state: &mut Established) -> Result<(), RLPxError> { } } } - Ok(()) } @@ -443,10 +482,7 @@ async fn post_handshake_checks( Ok(()) } -async fn send_disconnect_message( - state: &mut Established, - reason: Option, -) { +async fn send_disconnect_message(state: &mut Established, reason: Option) { send(state, Message::Disconnect(DisconnectMessage { reason })) .await .unwrap_or_else(|_| { @@ -577,7 +613,9 @@ async fn exchange_hello_messages(state: &mut Established) -> Result<(), RLPxErro } async fn send(state: &mut Established, message: Message) -> Result<(), RLPxError> { - state.framed.lock().await.send(message).await + let r = state.framed.lock().await.send(message).await; + log_peer_debug(&state.node, &format!("Sent!")); + r } /// Reads from the frame until a frame is available. @@ -606,7 +644,7 @@ fn spawn_listener( Some(message) => match message { Ok(message) => { log_peer_debug(&node, &format!("Received message {}", message)); - conn.call(InMessage::PeerMessage(message)).await; + let _ = conn.cast(CastMessage::PeerMessage(message)).await; return Ok(()); } Err(e) => { @@ -620,124 +658,128 @@ fn spawn_listener( }); } -async fn handle_message( - state: &mut RLPxConnectionState, - message: Message, -) -> Result<(), RLPxError> { - if let InnerState::Established(mut established_state) = state.0.clone() { - inner_handle(&mut established_state, message).await?; - state.0 = InnerState::Established(established_state); - Ok(()) - } else { - Err(RLPxError::StateError("Not established".to_string())) - } -} - -async fn inner_handle(established_state: &mut Established, message: Message) -> Result<(), RLPxError> { - let peer_supports_eth = established_state.negotiated_eth_capability.is_some(); - match message { - Message::Disconnect(msg_data) => { - log_peer_debug( - &established_state.node, - &format!("Received Disconnect: {}", msg_data.reason()), - ); - // TODO handle the disconnection request - return Err(RLPxError::DisconnectReceived(msg_data.reason())); - } - Message::Ping(_) => { - log_peer_debug(&established_state.node, "Sending pong message"); - send(established_state, Message::Pong(PongMessage {})).await?; - } - Message::Pong(_) => { - // We ignore received Pong messages - } - Message::Status(msg_data) => { - if let Some(eth) = &established_state.negotiated_eth_capability { - backend::validate_status(msg_data, &established_state.storage, eth.version).await? - }; - } - Message::GetAccountRange(req) => { - let response = process_account_range_request(req, established_state.storage.clone())?; - send(established_state, Message::AccountRange(response)).await? - } - // TODO(#1129) Add the transaction to the mempool once received. - Message::Transactions(txs) if peer_supports_eth => { - if established_state.blockchain.is_synced() { - let mut valid_txs = vec![]; - for tx in &txs.transactions { - if let Err(e) = established_state.blockchain.add_transaction_to_pool(tx.clone()).await { - log_peer_warn(&established_state.node, &format!("Error adding transaction: {}", e)); - continue; - } - valid_txs.push(tx.clone()); +async fn handle_peer_message(state: &mut Established, message: Message) -> Result<(), RLPxError> { + let peer_supports_eth = state.negotiated_eth_capability.is_some(); + match message { + Message::Disconnect(msg_data) => { + log_peer_debug( + &state.node, + &format!("Received Disconnect: {}", msg_data.reason()), + ); + // TODO handle the disconnection request + return Err(RLPxError::DisconnectReceived(msg_data.reason())); + } + Message::Ping(_) => { + log_peer_debug(&state.node, "Sending pong message"); + send(state, Message::Pong(PongMessage {})).await?; + } + Message::Pong(_) => { + // We ignore received Pong messages + } + Message::Status(msg_data) => { + if let Some(eth) = &state.negotiated_eth_capability { + backend::validate_status(msg_data, &state.storage, eth.version).await? + }; + } + Message::GetAccountRange(req) => { + let response = process_account_range_request(req, state.storage.clone())?; + send(state, Message::AccountRange(response)).await? + } + // TODO(#1129) Add the transaction to the mempool once received. + Message::Transactions(txs) if peer_supports_eth => { + if state.blockchain.is_synced() { + let mut valid_txs = vec![]; + for tx in &txs.transactions { + if let Err(e) = state.blockchain.add_transaction_to_pool(tx.clone()).await { + log_peer_warn(&state.node, &format!("Error adding transaction: {}", e)); + continue; } - broadcast_message(established_state, Message::Transactions(Transactions::new(valid_txs)))?; + valid_txs.push(tx.clone()); } + broadcast_message(state, Message::Transactions(Transactions::new(valid_txs)))?; } - Message::GetBlockHeaders(msg_data) if peer_supports_eth => { - let response = BlockHeaders { - id: msg_data.id, - block_headers: msg_data.fetch_headers(&established_state.storage).await, - }; - send(established_state, Message::BlockHeaders(response)).await?; - } - Message::GetBlockBodies(msg_data) if peer_supports_eth => { - let response = BlockBodies { - id: msg_data.id, - block_bodies: msg_data.fetch_blocks(&established_state.storage).await, - }; - send(established_state, Message::BlockBodies(response)).await?; - } - Message::GetReceipts(GetReceipts { id, block_hashes }) if peer_supports_eth => { - let mut receipts = Vec::new(); - for hash in block_hashes.iter() { - receipts.push(established_state.storage.get_receipts_for_block(hash)?); - } - let response = Receipts { id, receipts }; - send(established_state, Message::Receipts(response)).await?; - } - Message::NewPooledTransactionHashes(new_pooled_transaction_hashes) if peer_supports_eth => { - //TODO(#1415): evaluate keeping track of requests to avoid sending the same twice. - let hashes = - new_pooled_transaction_hashes.get_transactions_to_request(&established_state.blockchain)?; - - //TODO(#1416): Evaluate keeping track of the request-id. - let request = GetPooledTransactions::new(random(), hashes); - send(established_state, Message::GetPooledTransactions(request)).await?; - } - Message::GetPooledTransactions(msg) => { - let response = msg.handle(&established_state.blockchain)?; - send(established_state, Message::PooledTransactions(response)).await?; - } - Message::PooledTransactions(msg) if peer_supports_eth => { - if established_state.blockchain.is_synced() { - msg.handle(&established_state.node, &established_state.blockchain).await?; - } - } - Message::GetStorageRanges(req) => { - let response = process_storage_ranges_request(req, established_state.storage.clone())?; - send(established_state, Message::StorageRanges(response)).await? - } - Message::GetByteCodes(req) => { - let response = process_byte_codes_request(req, established_state.storage.clone())?; - send(established_state, Message::ByteCodes(response)).await? + } + Message::GetBlockHeaders(msg_data) if peer_supports_eth => { + let response = BlockHeaders { + id: msg_data.id, + block_headers: msg_data.fetch_headers(&state.storage).await, + }; + send(state, Message::BlockHeaders(response)).await?; + } + Message::GetBlockBodies(msg_data) if peer_supports_eth => { + let response = BlockBodies { + id: msg_data.id, + block_bodies: msg_data.fetch_blocks(&state.storage).await, + }; + send(state, Message::BlockBodies(response)).await?; + } + Message::GetReceipts(GetReceipts { id, block_hashes }) if peer_supports_eth => { + let mut receipts = Vec::new(); + for hash in block_hashes.iter() { + receipts.push(state.storage.get_receipts_for_block(hash)?); } - Message::GetTrieNodes(req) => { - let response = process_trie_nodes_request(req, established_state.storage.clone())?; - send(established_state, Message::TrieNodes(response)).await? + let response = Receipts { id, receipts }; + send(state, Message::Receipts(response)).await?; + } + Message::NewPooledTransactionHashes(new_pooled_transaction_hashes) if peer_supports_eth => { + //TODO(#1415): evaluate keeping track of requests to avoid sending the same twice. + let hashes = + new_pooled_transaction_hashes.get_transactions_to_request(&state.blockchain)?; + + //TODO(#1416): Evaluate keeping track of the request-id. + let request = GetPooledTransactions::new(random(), hashes); + send(state, Message::GetPooledTransactions(request)).await?; + } + Message::GetPooledTransactions(msg) => { + let response = msg.handle(&state.blockchain)?; + send(state, Message::PooledTransactions(response)).await?; + } + Message::PooledTransactions(msg) if peer_supports_eth => { + if state.blockchain.is_synced() { + msg.handle(&state.node, &state.blockchain).await?; } - // Send response messages to the backend - // message @ Message::AccountRange(_) - // | message @ Message::StorageRanges(_) - // | message @ Message::ByteCodes(_) - // | message @ Message::TrieNodes(_) - // | message @ Message::BlockBodies(_) - // | message @ Message::BlockHeaders(_) - // | message @ Message::Receipts(_) => sender.send(message).await?, - // TODO: Add new message types and handlers as they are implemented - message => return Err(RLPxError::MessageNotHandled(format!("{message}"))), - }; - Ok(()) + } + Message::GetStorageRanges(req) => { + let response = process_storage_ranges_request(req, state.storage.clone())?; + send(state, Message::StorageRanges(response)).await? + } + Message::GetByteCodes(req) => { + let response = process_byte_codes_request(req, state.storage.clone())?; + send(state, Message::ByteCodes(response)).await? + } + Message::GetTrieNodes(req) => { + let response = process_trie_nodes_request(req, state.storage.clone())?; + send(state, Message::TrieNodes(response)).await? + } + // Send response messages to the backend + message @ Message::AccountRange(_) + | message @ Message::StorageRanges(_) + | message @ Message::ByteCodes(_) + | message @ Message::TrieNodes(_) + | message @ Message::BlockBodies(_) + | message @ Message::BlockHeaders(_) + | message @ Message::Receipts(_) => { + state + .backend_channel + .as_mut() + // TODO: this unwrap() is temporary, until we fix the backend process to use spawned + .unwrap() + .send(message) + .await? + } + // TODO: Add new message types and handlers as they are implemented + message => return Err(RLPxError::MessageNotHandled(format!("{message}"))), + }; + Ok(()) +} + +async fn handle_backend_message( + state: &mut Established, + message: Message, +) -> Result<(), RLPxError> { + log_peer_debug(&state.node, &format!("Sending message {}", message)); + send(state, message).await?; + Ok(()) } fn broadcast_message(state: &Established, msg: Message) -> Result<(), RLPxError> { From 49fc0ee766ccdb32514a22a3979e3bbd8275e774 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Fri, 13 Jun 2025 17:59:26 -0300 Subject: [PATCH 04/40] Improved periodic tasks handling --- .../p2p/rlpx/connection/handshake.rs | 10 +-- .../networking/p2p/rlpx/connection/server.rs | 65 +++++++++---------- 2 files changed, 34 insertions(+), 41 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection/handshake.rs b/crates/networking/p2p/rlpx/connection/handshake.rs index 9ee01f3677..3a24288612 100644 --- a/crates/networking/p2p/rlpx/connection/handshake.rs +++ b/crates/networking/p2p/rlpx/connection/handshake.rs @@ -1,10 +1,8 @@ -use std::{collections::HashSet, sync::Arc, time::Instant}; +use std::{collections::HashSet, sync::Arc}; use crate::{ rlpx::{ - connection::server::{ - Established, InnerState, PERIODIC_PING_INTERVAL, PERIODIC_TX_BROADCAST_INTERVAL, - }, + connection::server::{Established, InnerState}, error::RLPxError, utils::{ compress_pubkey, decompress_pubkey, ecdh_xchng, kdf, log_peer_debug, sha256, @@ -83,8 +81,6 @@ pub(crate) async fn perform( capabilities: vec![], negotiated_eth_capability: None, negotiated_snap_capability: None, - next_periodic_ping: Instant::now() + PERIODIC_PING_INTERVAL, - next_tx_broadcast: Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL, broadcasted_txs: HashSet::new(), client_version: context.client_version.clone(), connection_broadcast_send: context.broadcast.clone(), @@ -119,8 +115,6 @@ pub(crate) async fn perform( capabilities: vec![], negotiated_eth_capability: None, negotiated_snap_capability: None, - next_periodic_ping: Instant::now() + PERIODIC_PING_INTERVAL, - next_tx_broadcast: Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL, broadcasted_txs: HashSet::new(), client_version: context.client_version.clone(), connection_broadcast_send: context.broadcast.clone(), diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index d0c486b53b..0768ab0694 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, net::SocketAddr, sync::Arc, time::Instant}; +use std::{collections::HashSet, net::SocketAddr, sync::Arc}; use ethrex_blockchain::Blockchain; use ethrex_common::{ @@ -48,7 +48,6 @@ use crate::{ use super::{codec::RLPxCodec, handshake}; -const PERIODIC_TASKS_CHECK_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); pub(crate) const PERIODIC_PING_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10); pub(crate) const PERIODIC_TX_BROADCAST_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); @@ -83,8 +82,6 @@ pub struct Established { pub(crate) capabilities: Vec, pub(crate) negotiated_eth_capability: Option, pub(crate) negotiated_snap_capability: Option, - pub(crate) next_periodic_ping: Instant, - pub(crate) next_tx_broadcast: Instant, pub(crate) broadcasted_txs: HashSet, pub(crate) client_version: String, //// Send end of the channel used to broadcast messages @@ -129,7 +126,8 @@ pub enum CastMessage { PeerMessage(Message), BroadcastMessage, BackendMessage(Message), - PeriodicCheck, + SendPing, + SendNewPooledTxHashes, } #[derive(Clone)] @@ -236,8 +234,24 @@ impl GenServer for RLPxConnection { Self::CastMsg::BackendMessage(message) => { let _ = handle_backend_message(&mut established_state, message).await; } - Self::CastMsg::PeriodicCheck => { - let _ = check_periodic_tasks(&mut established_state, handle).await; + Self::CastMsg::SendPing => { + let _ = send(&mut established_state, Message::Ping(PingMessage {})).await; + log_peer_debug(&established_state.node, "Ping sent"); + // TODO this should be removed when spawned_concurrency::tasks::send_interval is implemented. + send_after( + PERIODIC_PING_INTERVAL, + handle.clone(), + CastMessage::SendPing, + ); + } + Self::CastMsg::SendNewPooledTxHashes => { + let _ = send_new_pooled_tx_hashes(&mut established_state).await; + // TODO this should be removed when spawned_concurrency::tasks::send_interval is implemented. + send_after( + PERIODIC_TX_BROADCAST_INTERVAL, + handle.clone(), + CastMessage::SendNewPooledTxHashes, + ); } } // Update the state state @@ -315,10 +329,18 @@ async fn init( .await .unwrap(); + // TODO this should be replaced with spawned_concurrency::tasks::send_interval once it is properly implemented. send_after( - PERIODIC_TASKS_CHECK_INTERVAL, + PERIODIC_TX_BROADCAST_INTERVAL, handle.clone(), - CastMessage::PeriodicCheck, + CastMessage::SendNewPooledTxHashes, + ); + + // TODO this should be replaced with spawned_concurrency::tasks::send_interval once it is properly implemented. + send_after( + PERIODIC_PING_INTERVAL, + handle.clone(), + CastMessage::SendPing, ); let node = established_state.clone().node; @@ -329,27 +351,6 @@ async fn init( } } -async fn check_periodic_tasks( - state: &mut Established, - handle: &RLPxConnectionHandle, -) -> Result<(), RLPxError> { - send_after( - PERIODIC_TASKS_CHECK_INTERVAL, - handle.clone(), - CastMessage::PeriodicCheck, - ); - if Instant::now() >= state.next_periodic_ping { - send(state, Message::Ping(PingMessage {})).await?; - log_peer_debug(&state.node, "Ping sent"); - state.next_periodic_ping = Instant::now() + PERIODIC_PING_INTERVAL; - } - if Instant::now() >= state.next_tx_broadcast { - send_new_pooled_tx_hashes(state).await?; - state.next_tx_broadcast = Instant::now() + PERIODIC_TX_BROADCAST_INTERVAL; - } - Ok(()) -} - async fn send_new_pooled_tx_hashes(state: &mut Established) -> Result<(), RLPxError> { if SUPPORTED_ETH_CAPABILITIES .iter() @@ -613,9 +614,7 @@ async fn exchange_hello_messages(state: &mut Established) -> Result<(), RLPxErro } async fn send(state: &mut Established, message: Message) -> Result<(), RLPxError> { - let r = state.framed.lock().await.send(message).await; - log_peer_debug(&state.node, &format!("Sent!")); - r + state.framed.lock().await.send(message).await } /// Reads from the frame until a frame is available. From 62956481547cbc241d0156476a1b0f9c9e16d4c5 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Tue, 17 Jun 2025 17:56:51 -0300 Subject: [PATCH 05/40] Added broadcast channel handling --- .../networking/p2p/rlpx/connection/server.rs | 191 ++++++++---------- crates/networking/p2p/rlpx/utils.rs | 4 +- 2 files changed, 88 insertions(+), 107 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 0768ab0694..0d1d1f706d 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -15,10 +15,11 @@ use spawned_concurrency::tasks::{ use tokio::{ net::{TcpSocket, TcpStream}, sync::{broadcast, mpsc::Sender, Mutex}, + task, }; use tokio_stream::StreamExt; use tokio_util::codec::Framed; -use tracing::{debug, error, info}; +use tracing::{debug, error}; use crate::{ discv4::server::MAX_PEERS_TCP_CONNECTIONS, @@ -48,9 +49,8 @@ use crate::{ use super::{codec::RLPxCodec, handshake}; -pub(crate) const PERIODIC_PING_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10); -pub(crate) const PERIODIC_TX_BROADCAST_INTERVAL: std::time::Duration = - std::time::Duration::from_millis(500); +pub(crate) const PING_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10); +pub(crate) const TX_BROADCAST_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500); pub(crate) type RLPxConnBroadcastSender = broadcast::Sender<(tokio::task::Id, Arc)>; @@ -124,10 +124,10 @@ pub enum CallMessage { pub enum CastMessage { PeerMessage(Message), - BroadcastMessage, BackendMessage(Message), SendPing, SendNewPooledTxHashes, + BroadcastMessage(task::Id, Arc), } #[derive(Clone)] @@ -145,25 +145,16 @@ pub struct RLPxConnection {} impl RLPxConnection { pub async fn spawn_as_receiver(context: P2PContext, peer_addr: SocketAddr, stream: TcpStream) { - info!("spawn_as_receiver"); let state = RLPxConnectionState::new_as_receiver(context, peer_addr); - info!("r new state"); let mut conn = RLPxConnection::start(state); - info!("r connected"); match conn.call(CallMessage::Init(stream)).await { - Ok(Ok(OutMessage::InitResponse { node, framed })) => { - info!("r listener"); - spawn_listener(conn, node, framed); - info!("r done"); - } - Ok(Ok(_)) => error!("Unexpected response from connection"), + Ok(Ok(_)) => {} Ok(Err(error)) => error!("Error starting RLPxConnection: {:?}", error), Err(error) => error!("Unhandled error starting RLPxConnection: {:?}", error), } } pub async fn spawn_as_initiator(context: P2PContext, node: &Node) { - info!("spawn_as_initiator"); let addr = SocketAddr::new(node.ip, node.tcp_port); let stream = match tcp_stream(addr).await { Ok(result) => result, @@ -173,18 +164,10 @@ impl RLPxConnection { return; } }; - info!("i stream"); let state = RLPxConnectionState::new_as_initiator(context, node); - info!("i new state"); let mut conn = RLPxConnection::start(state.clone()); - info!("i connected"); match conn.call(CallMessage::Init(stream)).await { - Ok(Ok(OutMessage::InitResponse { node, framed })) => { - info!("i listener"); - spawn_listener(conn, node, framed); - info!("i done"); - } - Ok(Ok(_)) => error!("Unexpected response from connection"), + Ok(Ok(_)) => {} Ok(Err(error)) => error!("Error starting RLPxConnection: {:?}", error), Err(error) => error!("Unhandled error starting RLPxConnection: {:?}", error), } @@ -210,9 +193,7 @@ impl GenServer for RLPxConnection { ) -> CallResponse { match message { Self::CallMsg::Init(stream) => match init(state, handle, stream).await { - Ok((node, framed)) => { - CallResponse::Reply(Ok(OutMessage::InitResponse { node, framed })) - } + Ok(()) => CallResponse::Reply(Ok(OutMessage::Done)), Err(e) => CallResponse::Reply(Err(e)), }, } @@ -228,33 +209,44 @@ impl GenServer for RLPxConnection { match message { // TODO: handle all these "let _" Self::CastMsg::PeerMessage(message) => { + log_peer_debug( + &established_state.node, + &format!("Received peer message: {message}"), + ); let _ = handle_peer_message(&mut established_state, message).await; } - Self::CastMsg::BroadcastMessage => todo!(), Self::CastMsg::BackendMessage(message) => { + log_peer_debug( + &established_state.node, + &format!("Received backend message: {message}"), + ); let _ = handle_backend_message(&mut established_state, message).await; } Self::CastMsg::SendPing => { let _ = send(&mut established_state, Message::Ping(PingMessage {})).await; log_peer_debug(&established_state.node, "Ping sent"); // TODO this should be removed when spawned_concurrency::tasks::send_interval is implemented. - send_after( - PERIODIC_PING_INTERVAL, - handle.clone(), - CastMessage::SendPing, - ); + send_after(PING_INTERVAL, handle.clone(), CastMessage::SendPing); } Self::CastMsg::SendNewPooledTxHashes => { let _ = send_new_pooled_tx_hashes(&mut established_state).await; + log_peer_debug(&established_state.node, "SendNewPooledTxHashes sent"); // TODO this should be removed when spawned_concurrency::tasks::send_interval is implemented. send_after( - PERIODIC_TX_BROADCAST_INTERVAL, + TX_BROADCAST_INTERVAL, handle.clone(), CastMessage::SendNewPooledTxHashes, ); } + Self::CastMsg::BroadcastMessage(id, msg) => { + log_peer_debug( + &established_state.node, + &format!("Received broadcasted message: {msg}"), + ); + handle_broadcast(&mut established_state, (id, msg)); + } } - // Update the state state + // Update the state state.0 = InnerState::Established(established_state); CastResponse::NoReply } else { @@ -273,7 +265,7 @@ async fn init( state: &mut RLPxConnectionState, handle: &RLPxConnectionHandle, stream: TcpStream, -) -> Result<(Node, Arc>>), RLPxError> { +) -> Result<(), RLPxError> { let mut established_state = handshake::perform(state, stream).await?; log_peer_debug(&established_state.node, "Starting RLPx connection"); if let Err(reason) = post_handshake_checks(established_state.table.clone()).await { @@ -312,42 +304,31 @@ async fn init( } init_peer_conn(&mut established_state).await?; log_peer_debug(&established_state.node, "Peer connection initialized."); - // Subscribe this connection to the broadcasting channel. - // TODO this channel is not yet connected. Broadcast is not working - let broadcaster_receive = if established_state.negotiated_eth_capability.is_some() { - Some( - established_state - .connection_broadcast_send - .clone() - .subscribe(), - ) - } else { - None - }; + // Send transactions transaction hashes from mempool at connection start - send_new_pooled_tx_hashes(&mut established_state) - .await - .unwrap(); + send_new_pooled_tx_hashes(&mut established_state).await?; // TODO this should be replaced with spawned_concurrency::tasks::send_interval once it is properly implemented. send_after( - PERIODIC_TX_BROADCAST_INTERVAL, + TX_BROADCAST_INTERVAL, handle.clone(), CastMessage::SendNewPooledTxHashes, ); // TODO this should be replaced with spawned_concurrency::tasks::send_interval once it is properly implemented. - send_after( - PERIODIC_PING_INTERVAL, + send_after(PING_INTERVAL, handle.clone(), CastMessage::SendPing); + + spawn_listener( handle.clone(), - CastMessage::SendPing, + &established_state.node, + &established_state.framed, ); - let node = established_state.clone().node; - let framed = established_state.clone().framed; + spawn_broadcast_listener(handle.clone(), &mut established_state); + // New state state.0 = InnerState::Established(established_state); - Ok((node, framed)) + Ok(()) } } @@ -388,51 +369,6 @@ async fn send_new_pooled_tx_hashes(state: &mut Established) -> Result<(), RLPxEr Ok(()) } -// async fn connection_loop( -// &mut self, -// sender: tokio::sync::mpsc::Sender, -// mut receiver: tokio::sync::mpsc::Receiver, -// ) -> Result<(), RLPxError> { - -// // Start listening for messages, -// loop { -// tokio::select! { -// // Expect a message from the remote peer -// Some(message) = self.receive() => { -// match message { -// Ok(message) => { -// log_peer_debug(&self.node, &format!("Received message {}", message)); -// self.handle_message(message, sender.clone()).await?; -// }, -// Err(e) => { -// log_peer_debug(&self.node, &format!("Received RLPX Error in msg {}", e)); -// return Err(e); -// } -// } -// } -// // Expect a message from the backend -// Some(message) = receiver.recv() => { -// log_peer_debug(&self.node, &format!("Sending message {}", message)); -// self.send(message).await?; -// } -// // This is not ideal, but using the receiver without -// // this function call, causes the loop to take ownwership -// // of the variable and the compiler will complain about it, -// // with this function, we avoid that. -// // If the broadcaster is Some (i.e. we're connected to a peer that supports an eth protocol), -// // we'll receive broadcasted messages from another connections through a channel, otherwise -// // the function below will yield immediately but the select will not match and -// // ignore the returned value. -// Some(broadcasted_msg) = Self::maybe_wait_for_broadcaster(&mut broadcaster_receive) => { -// self.handle_broadcast(broadcasted_msg?).await? -// } -// // Allow an interruption to check periodic tasks -// _ = sleep(PERIODIC_TASKS_CHECK_INTERVAL) => (), // noop -// } -// self.check_periodic_tasks().await?; -// } -// } - async fn init_peer_conn(state: &mut Established) -> Result<(), RLPxError> { // Sending eth Status if peer supports it if let Some(eth) = state.negotiated_eth_capability.clone() { @@ -634,9 +570,11 @@ async fn receive(state: &mut Established) -> Option> fn spawn_listener( mut conn: RLPxConnectionHandle, - node: Node, - framed: Arc>>, + node: &Node, + framed: &Arc>>, ) { + let node = node.clone(); + let framed = framed.clone(); spawned_rt::tasks::spawn(async move { loop { match framed.lock().await.next().await { @@ -657,6 +595,25 @@ fn spawn_listener( }); } +fn spawn_broadcast_listener(mut handle: RLPxConnectionHandle, state: &mut Established) { + // Subscribe this connection to the broadcasting channel. + // TODO currently spawning a listener task that will suscribe to a broadcast channel and + // create RLPxConnection Broadcast messages to send the Genserver + // We have to improve this mechanism to avoid manual creation of channels and subscriptions + // (That is, we should have a spawned-based broadcaster or maybe the backend should handle the + // transactions propagation) + if state.negotiated_eth_capability.is_some() { + let mut receiver = state.connection_broadcast_send.subscribe(); + spawned_rt::tasks::spawn(async move { + loop { + if let Ok((id, msg)) = receiver.recv().await { + let _ = handle.cast(CastMessage::BroadcastMessage(id, msg)).await; + }; + } + }); + }; +} + async fn handle_peer_message(state: &mut Established, message: Message) -> Result<(), RLPxError> { let peer_supports_eth = state.negotiated_eth_capability.is_some(); match message { @@ -781,6 +738,30 @@ async fn handle_backend_message( Ok(()) } +async fn handle_broadcast( + state: &mut Established, + (id, broadcasted_msg): (task::Id, Arc), +) -> Result<(), RLPxError> { + if id != tokio::task::id() { + match broadcasted_msg.as_ref() { + Message::Transactions(ref txs) => { + // TODO(#1131): Avoid cloning this vector. + let cloned = txs.transactions.clone(); + let new_msg = Message::Transactions(Transactions { + transactions: cloned, + }); + send(state, new_msg).await?; + } + msg => { + let error_message = format!("Non-supported message broadcasted: {msg}"); + log_peer_error(&state.node, &error_message); + return Err(RLPxError::BroadcastError(error_message)); + } + } + } + Ok(()) +} + fn broadcast_message(state: &Established, msg: Message) -> Result<(), RLPxError> { match msg { txs_msg @ Message::Transactions(_) => { diff --git a/crates/networking/p2p/rlpx/utils.rs b/crates/networking/p2p/rlpx/utils.rs index 2924fd144c..f02afa3ffa 100644 --- a/crates/networking/p2p/rlpx/utils.rs +++ b/crates/networking/p2p/rlpx/utils.rs @@ -7,7 +7,7 @@ use k256::{ }; use sha3::{Digest, Keccak256}; use snap::raw::{max_compress_len, Decoder as SnappyDecoder, Encoder as SnappyEncoder}; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, warn}; pub fn sha256(data: &[u8]) -> [u8; 32] { use k256::sha2::Digest; @@ -73,7 +73,7 @@ pub fn snappy_decompress(msg_data: &[u8]) -> Result, RLPDecodeError> { } pub(crate) fn log_peer_debug(node: &Node, text: &str) { - info!("[{0}]: {1}", node, text) + debug!("[{0}]: {1}", node, text) } pub(crate) fn log_peer_error(node: &Node, text: &str) { From b5585cf863c4f99e850dcd62d0286cb7864927a8 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Wed, 18 Jun 2025 15:48:17 -0300 Subject: [PATCH 06/40] Simplified RLPxConnection initialization --- Cargo.lock | 4 +- Cargo.toml | 4 +- .../p2p/rlpx/connection/handshake.rs | 46 ++-- .../networking/p2p/rlpx/connection/server.rs | 205 +++++++++--------- 4 files changed, 133 insertions(+), 126 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf57f6abbc..307fbf833d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9535,7 +9535,7 @@ dependencies = [ [[package]] name = "spawned-concurrency" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?rev=0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf#0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf" +source = "git+https://github.com/lambdaclass/spawned.git?rev=3210230fc81a67cf73210e3037ffaca209d289a0#3210230fc81a67cf73210e3037ffaca209d289a0" dependencies = [ "futures", "spawned-rt", @@ -9545,7 +9545,7 @@ dependencies = [ [[package]] name = "spawned-rt" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?rev=0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf#0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf" +source = "git+https://github.com/lambdaclass/spawned.git?rev=3210230fc81a67cf73210e3037ffaca209d289a0#3210230fc81a67cf73210e3037ffaca209d289a0" dependencies = [ "crossbeam 0.7.3", "tokio", diff --git a/Cargo.toml b/Cargo.toml index fe6c13b21b..87f08d45ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,5 +88,5 @@ libsecp256k1 = "0.7.2" clap = { version = "4.3", features = ["derive", "env"] } clap_complete = "4.5.17" eyre = "0.6.12" -spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", rev = "0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf"} -spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", rev = "0d1c4bbb7fd771c407e9506b4f4dd8374b6ae0bf"} +spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", rev = "3210230fc81a67cf73210e3037ffaca209d289a0"} +spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", rev = "3210230fc81a67cf73210e3037ffaca209d289a0"} diff --git a/crates/networking/p2p/rlpx/connection/handshake.rs b/crates/networking/p2p/rlpx/connection/handshake.rs index 3a24288612..243b46668b 100644 --- a/crates/networking/p2p/rlpx/connection/handshake.rs +++ b/crates/networking/p2p/rlpx/connection/handshake.rs @@ -28,13 +28,15 @@ use rand::Rng; use sha3::{Digest, Keccak256}; use tokio::{ io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, - net::TcpStream, sync::Mutex, }; use tokio_util::codec::Framed; use tracing::info; -use super::{codec::RLPxCodec, server::RLPxConnectionState}; +use super::{ + codec::RLPxCodec, + server::{Initiator, Receiver}, +}; type Aes128Ctr64BE = ctr::Ctr64BE; @@ -54,28 +56,30 @@ pub(crate) struct LocalState { pub(crate) init_message: Vec, } -pub(crate) async fn perform( - state: &mut RLPxConnectionState, - mut stream: TcpStream, -) -> Result { - match &state.0 { - InnerState::Initiator(initiator) => { +pub(crate) async fn perform(state: InnerState) -> Result { + match state { + InnerState::Initiator(Initiator { + context, + node, + stream, + }) => { info!("Starting handshake as initiator!"); - let context = &initiator.context; - let local_state = - send_auth(&context.signer, initiator.node.public_key, &mut stream).await?; - let remote_state = - receive_ack(&context.signer, initiator.node.public_key, &mut stream).await?; + let mut stream = match Arc::try_unwrap(stream) { + Ok(s) => s, + Err(_) => return Err(RLPxError::StateError("Cannot use the stream".to_string())), + }; + let local_state = send_auth(&context.signer, node.public_key, &mut stream).await?; + let remote_state = receive_ack(&context.signer, node.public_key, &mut stream).await?; // Local node is initator // keccak256(nonce || initiator-nonce) let hashed_nonces: [u8; 32] = Keccak256::digest([remote_state.nonce.0, local_state.nonce.0].concat()).into(); let codec = RLPxCodec::new(&local_state, &remote_state, hashed_nonces); - log_peer_debug(&initiator.node, "Completed handshake as initiator!"); + log_peer_debug(&node, "Completed handshake as initiator!"); Ok(Established { signer: context.signer.clone(), framed: Arc::new(Mutex::new(Framed::new(stream, codec))), - node: initiator.node.clone(), + node: node.clone(), storage: context.storage.clone(), blockchain: context.blockchain.clone(), capabilities: vec![], @@ -89,9 +93,16 @@ pub(crate) async fn perform( inbound: false, }) } - InnerState::Receiver(receiver) => { + InnerState::Receiver(Receiver { + context, + peer_addr, + stream, + }) => { info!("Starting handshake as receiver!"); - let context = &receiver.context; + let mut stream = match Arc::try_unwrap(stream) { + Ok(s) => s, + Err(_) => return Err(RLPxError::StateError("Cannot use the stream".to_string())), + }; let remote_state = receive_auth(&context.signer, &mut stream).await?; let local_state = send_ack(remote_state.public_key, &mut stream).await?; // Remote node is initiator @@ -99,7 +110,6 @@ pub(crate) async fn perform( let hashed_nonces: [u8; 32] = Keccak256::digest([local_state.nonce.0, remote_state.nonce.0].concat()).into(); let codec = RLPxCodec::new(&local_state, &remote_state, hashed_nonces); - let peer_addr = receiver.peer_addr; let node = Node::new( peer_addr.ip(), peer_addr.port(), diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 0d1d1f706d..f3a01f7b89 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -64,12 +64,14 @@ pub struct RLPxConnectionState(pub InnerState); pub struct Initiator { pub(crate) context: P2PContext, pub(crate) node: Node, + pub(crate) stream: Arc, } #[derive(Clone)] pub struct Receiver { pub(crate) context: P2PContext, pub(crate) peer_addr: SocketAddr, + pub(crate) stream: Arc, } #[derive(Clone)] @@ -106,21 +108,24 @@ pub enum InnerState { } impl RLPxConnectionState { - pub fn new_as_receiver(context: P2PContext, peer_addr: SocketAddr) -> Self { - Self(InnerState::Receiver(Receiver { context, peer_addr })) + pub fn new_as_receiver(context: P2PContext, peer_addr: SocketAddr, stream: TcpStream) -> Self { + Self(InnerState::Receiver(Receiver { + context, + peer_addr, + stream: Arc::new(stream), + })) } - pub fn new_as_initiator(context: P2PContext, node: &Node) -> Self { + pub fn new_as_initiator(context: P2PContext, node: &Node, stream: TcpStream) -> Self { Self(InnerState::Initiator(Initiator { context, node: node.clone(), + stream: Arc::new(stream), })) } } -pub enum CallMessage { - Init(TcpStream), -} +pub enum CallMessage {} pub enum CastMessage { PeerMessage(Message), @@ -144,33 +149,30 @@ pub enum OutMessage { pub struct RLPxConnection {} impl RLPxConnection { - pub async fn spawn_as_receiver(context: P2PContext, peer_addr: SocketAddr, stream: TcpStream) { - let state = RLPxConnectionState::new_as_receiver(context, peer_addr); - let mut conn = RLPxConnection::start(state); - match conn.call(CallMessage::Init(stream)).await { - Ok(Ok(_)) => {} - Ok(Err(error)) => error!("Error starting RLPxConnection: {:?}", error), - Err(error) => error!("Unhandled error starting RLPxConnection: {:?}", error), - } + pub async fn spawn_as_receiver( + context: P2PContext, + peer_addr: SocketAddr, + stream: TcpStream, + ) -> Result { + let state = RLPxConnectionState::new_as_receiver(context, peer_addr, stream); + Ok(RLPxConnection::start(state)) } - pub async fn spawn_as_initiator(context: P2PContext, node: &Node) { + pub async fn spawn_as_initiator( + context: P2PContext, + node: &Node, + ) -> Result { let addr = SocketAddr::new(node.ip, node.tcp_port); let stream = match tcp_stream(addr).await { Ok(result) => result, Err(error) => { log_peer_debug(node, &format!("Error creating tcp connection {error}")); context.table.lock().await.replace_peer(node.node_id()); - return; + return Err(error); } }; - let state = RLPxConnectionState::new_as_initiator(context, node); - let mut conn = RLPxConnection::start(state.clone()); - match conn.call(CallMessage::Init(stream)).await { - Ok(Ok(_)) => {} - Ok(Err(error)) => error!("Error starting RLPxConnection: {:?}", error), - Err(error) => error!("Unhandled error starting RLPxConnection: {:?}", error), - } + let state = RLPxConnectionState::new_as_initiator(context, node, stream); + Ok(RLPxConnection::start(state.clone())) } } @@ -185,18 +187,84 @@ impl GenServer for RLPxConnection { Self {} } + async fn init( + &mut self, + handle: &GenServerHandle, + mut state: Self::State, + ) -> Result { + let mut established_state = handshake::perform(state.0).await?; + log_peer_debug(&established_state.node, "Starting RLPx connection"); + if let Err(reason) = post_handshake_checks(established_state.table.clone()).await { + connection_failed( + &mut established_state, + "Post handshake validations failed", + RLPxError::DisconnectSent(reason), + ) + .await; + return Err(RLPxError::Disconnected()); + } + + if let Err(e) = exchange_hello_messages(&mut established_state).await { + connection_failed(&mut established_state, "Hello messages exchange failed", e).await; + return Err(RLPxError::Disconnected()); + } else { + // Handshake OK: handle connection + // Create channels to communicate directly to the peer + let (peer_channels, sender) = PeerChannels::create(handle.clone()); + + // Updating the state to establish the backend channel + established_state.backend_channel = Some(sender); + + // NOTE: if the peer came from the discovery server it will already be inserted in the table + // but that might not always be the case, so we try to add it to the table + // Note: we don't ping the node we let the validation service do its job + { + let mut table_lock = established_state.table.lock().await; + table_lock.insert_node_forced(established_state.node.clone()); + table_lock.init_backend_communication( + established_state.node.node_id(), + peer_channels, + established_state.capabilities.clone(), + established_state.inbound, + ); + } + init_peer_conn(&mut established_state).await?; + log_peer_debug(&established_state.node, "Peer connection initialized."); + + // Send transactions transaction hashes from mempool at connection start + send_new_pooled_tx_hashes(&mut established_state).await?; + + // TODO this should be replaced with spawned_concurrency::tasks::send_interval once it is properly implemented. + send_after( + TX_BROADCAST_INTERVAL, + handle.clone(), + CastMessage::SendNewPooledTxHashes, + ); + + // TODO this should be replaced with spawned_concurrency::tasks::send_interval once it is properly implemented. + send_after(PING_INTERVAL, handle.clone(), CastMessage::SendPing); + + spawn_listener( + handle.clone(), + &established_state.node, + &established_state.framed, + ); + + spawn_broadcast_listener(handle.clone(), &mut established_state); + + // New state + state.0 = InnerState::Established(established_state); + Ok(state) + } + } + async fn handle_call( &mut self, - message: Self::CallMsg, - handle: &RLPxConnectionHandle, - state: &mut Self::State, + _message: Self::CallMsg, + _handle: &RLPxConnectionHandle, + _state: &mut Self::State, ) -> CallResponse { - match message { - Self::CallMsg::Init(stream) => match init(state, handle, stream).await { - Ok(()) => CallResponse::Reply(Ok(OutMessage::Done)), - Err(e) => CallResponse::Reply(Err(e)), - }, - } + CallResponse::Reply(Ok(OutMessage::Done)) } async fn handle_cast( @@ -243,7 +311,7 @@ impl GenServer for RLPxConnection { &established_state.node, &format!("Received broadcasted message: {msg}"), ); - handle_broadcast(&mut established_state, (id, msg)); + let _ = handle_broadcast(&mut established_state, (id, msg)).await; } } // Update the state @@ -261,77 +329,6 @@ async fn tcp_stream(addr: SocketAddr) -> Result { TcpSocket::new_v4()?.connect(addr).await } -async fn init( - state: &mut RLPxConnectionState, - handle: &RLPxConnectionHandle, - stream: TcpStream, -) -> Result<(), RLPxError> { - let mut established_state = handshake::perform(state, stream).await?; - log_peer_debug(&established_state.node, "Starting RLPx connection"); - if let Err(reason) = post_handshake_checks(established_state.table.clone()).await { - connection_failed( - &mut established_state, - "Post handshake validations failed", - RLPxError::DisconnectSent(reason), - ) - .await; - return Err(RLPxError::Disconnected()); - } - - if let Err(e) = exchange_hello_messages(&mut established_state).await { - connection_failed(&mut established_state, "Hello messages exchange failed", e).await; - return Err(RLPxError::Disconnected()); - } else { - // Handshake OK: handle connection - // Create channels to communicate directly to the peer - let (peer_channels, sender) = PeerChannels::create(handle.clone()); - - // Updating the state to establish the backend channel - established_state.backend_channel = Some(sender); - - // NOTE: if the peer came from the discovery server it will already be inserted in the table - // but that might not always be the case, so we try to add it to the table - // Note: we don't ping the node we let the validation service do its job - { - let mut table_lock = established_state.table.lock().await; - table_lock.insert_node_forced(established_state.node.clone()); - table_lock.init_backend_communication( - established_state.node.node_id(), - peer_channels, - established_state.capabilities.clone(), - established_state.inbound, - ); - } - init_peer_conn(&mut established_state).await?; - log_peer_debug(&established_state.node, "Peer connection initialized."); - - // Send transactions transaction hashes from mempool at connection start - send_new_pooled_tx_hashes(&mut established_state).await?; - - // TODO this should be replaced with spawned_concurrency::tasks::send_interval once it is properly implemented. - send_after( - TX_BROADCAST_INTERVAL, - handle.clone(), - CastMessage::SendNewPooledTxHashes, - ); - - // TODO this should be replaced with spawned_concurrency::tasks::send_interval once it is properly implemented. - send_after(PING_INTERVAL, handle.clone(), CastMessage::SendPing); - - spawn_listener( - handle.clone(), - &established_state.node, - &established_state.framed, - ); - - spawn_broadcast_listener(handle.clone(), &mut established_state); - - // New state - state.0 = InnerState::Established(established_state); - Ok(()) - } -} - async fn send_new_pooled_tx_hashes(state: &mut Established) -> Result<(), RLPxError> { if SUPPORTED_ETH_CAPABILITIES .iter() From a35930f2a676fdbff5bd49927786c7484f595bc6 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Thu, 19 Jun 2025 20:28:09 -0300 Subject: [PATCH 07/40] Removed Arc>> for listening stream --- Cargo.lock | 4 +- Cargo.toml | 4 +- crates/l2/sequencer/l1_watcher.rs | 14 ++-- .../p2p/rlpx/connection/handshake.rs | 65 ++++++++-------- .../networking/p2p/rlpx/connection/server.rs | 76 ++++++++++--------- 5 files changed, 80 insertions(+), 83 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 307fbf833d..e371f1ae9b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9535,7 +9535,7 @@ dependencies = [ [[package]] name = "spawned-concurrency" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?rev=3210230fc81a67cf73210e3037ffaca209d289a0#3210230fc81a67cf73210e3037ffaca209d289a0" +source = "git+https://github.com/lambdaclass/spawned.git?rev=28d2db5ba7dc98e7ec03957691684113d287e05f#28d2db5ba7dc98e7ec03957691684113d287e05f" dependencies = [ "futures", "spawned-rt", @@ -9545,7 +9545,7 @@ dependencies = [ [[package]] name = "spawned-rt" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?rev=3210230fc81a67cf73210e3037ffaca209d289a0#3210230fc81a67cf73210e3037ffaca209d289a0" +source = "git+https://github.com/lambdaclass/spawned.git?rev=28d2db5ba7dc98e7ec03957691684113d287e05f#28d2db5ba7dc98e7ec03957691684113d287e05f" dependencies = [ "crossbeam 0.7.3", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 87f08d45ec..2a63fae909 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,5 +88,5 @@ libsecp256k1 = "0.7.2" clap = { version = "4.3", features = ["derive", "env"] } clap_complete = "4.5.17" eyre = "0.6.12" -spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", rev = "3210230fc81a67cf73210e3037ffaca209d289a0"} -spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", rev = "3210230fc81a67cf73210e3037ffaca209d289a0"} +spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", rev = "28d2db5ba7dc98e7ec03957691684113d287e05f"} +spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", rev = "28d2db5ba7dc98e7ec03957691684113d287e05f"} diff --git a/crates/l2/sequencer/l1_watcher.rs b/crates/l2/sequencer/l1_watcher.rs index 977168a62f..83205f4785 100644 --- a/crates/l2/sequencer/l1_watcher.rs +++ b/crates/l2/sequencer/l1_watcher.rs @@ -100,26 +100,26 @@ impl GenServer for L1Watcher { &mut self, _message: Self::CallMsg, _handle: &GenServerHandle, - _state: &mut Self::State, + state: Self::State, ) -> CallResponse { - CallResponse::Reply(OutMessage::Done) + CallResponse::Reply(state, OutMessage::Done) } async fn handle_cast( &mut self, message: Self::CastMsg, handle: &GenServerHandle, - state: &mut Self::State, - ) -> CastResponse { + state: Self::State, + ) -> CastResponse { match message { Self::InMsg::Watch => { let check_interval = random_duration(state.check_interval); send_after(check_interval, handle.clone(), Self::InMsg::Watch); - match get_logs(state).await { + match get_logs(&mut state).await { Ok(logs) => { // We may not have a deposit nor a withdrawal, that means no events -> no logs. if !logs.is_empty() { - if let Err(err) = process_logs(state, logs).await { + if let Err(err) = process_logs(&state, logs).await { error!("L1 Watcher Error: {}", err) }; }; @@ -127,7 +127,7 @@ impl GenServer for L1Watcher { Err(err) => error!("L1 Watcher Error: {}", err), }; - CastResponse::NoReply + CastResponse::NoReply(state) } } } diff --git a/crates/networking/p2p/rlpx/connection/handshake.rs b/crates/networking/p2p/rlpx/connection/handshake.rs index 243b46668b..e535e896c4 100644 --- a/crates/networking/p2p/rlpx/connection/handshake.rs +++ b/crates/networking/p2p/rlpx/connection/handshake.rs @@ -19,6 +19,7 @@ use ethrex_rlp::{ error::RLPDecodeError, structs::{Decoder, Encoder}, }; +use futures::{stream::SplitStream, StreamExt}; use k256::{ ecdsa::{self, RecoveryId, SigningKey, VerifyingKey}, elliptic_curve::sec1::ToEncodedPoint, @@ -28,6 +29,7 @@ use rand::Rng; use sha3::{Digest, Keccak256}; use tokio::{ io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, + net::TcpStream, sync::Mutex, }; use tokio_util::codec::Framed; @@ -56,8 +58,10 @@ pub(crate) struct LocalState { pub(crate) init_message: Vec, } -pub(crate) async fn perform(state: InnerState) -> Result { - match state { +pub(crate) async fn perform( + state: InnerState, +) -> Result<(Established, SplitStream>), RLPxError> { + let (context, node, framed, inbound) = match state { InnerState::Initiator(Initiator { context, node, @@ -76,22 +80,7 @@ pub(crate) async fn perform(state: InnerState) -> Result Keccak256::digest([remote_state.nonce.0, local_state.nonce.0].concat()).into(); let codec = RLPxCodec::new(&local_state, &remote_state, hashed_nonces); log_peer_debug(&node, "Completed handshake as initiator!"); - Ok(Established { - signer: context.signer.clone(), - framed: Arc::new(Mutex::new(Framed::new(stream, codec))), - node: node.clone(), - storage: context.storage.clone(), - blockchain: context.blockchain.clone(), - capabilities: vec![], - negotiated_eth_capability: None, - negotiated_snap_capability: None, - broadcasted_txs: HashSet::new(), - client_version: context.client_version.clone(), - connection_broadcast_send: context.broadcast.clone(), - table: context.table.clone(), - backend_channel: None, - inbound: false, - }) + (context, node, Framed::new(stream, codec), false) } InnerState::Receiver(Receiver { context, @@ -116,27 +105,33 @@ pub(crate) async fn perform(state: InnerState) -> Result peer_addr.port(), remote_state.public_key, ); - Ok(Established { - signer: context.signer.clone(), - framed: Arc::new(Mutex::new(Framed::new(stream, codec))), - node: node, - storage: context.storage.clone(), - blockchain: context.blockchain.clone(), - capabilities: vec![], - negotiated_eth_capability: None, - negotiated_snap_capability: None, - broadcasted_txs: HashSet::new(), - client_version: context.client_version.clone(), - connection_broadcast_send: context.broadcast.clone(), - table: context.table.clone(), - backend_channel: None, - inbound: true, - }) + log_peer_debug(&node, "Completed handshake as receiver!"); + (context, node, Framed::new(stream, codec), true) } InnerState::Established(_) => { return Err(RLPxError::StateError("Already established".to_string())) } - } + }; + let (sink, stream) = framed.split(); + Ok(( + Established { + signer: context.signer.clone(), + sink: Arc::new(Mutex::new(sink)), + node: node.clone(), + storage: context.storage.clone(), + blockchain: context.blockchain.clone(), + capabilities: vec![], + negotiated_eth_capability: None, + negotiated_snap_capability: None, + broadcasted_txs: HashSet::new(), + client_version: context.client_version.clone(), + connection_broadcast_send: context.broadcast.clone(), + table: context.table.clone(), + backend_channel: None, + inbound, + }, + stream, + )) } async fn send_auth( diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index f3a01f7b89..cc0c1def0f 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -6,7 +6,7 @@ use ethrex_common::{ H256, }; use ethrex_storage::Store; -use futures::SinkExt; +use futures::{stream::SplitSink, SinkExt as _, Stream}; use k256::{ecdsa::SigningKey, PublicKey}; use rand::random; use spawned_concurrency::tasks::{ @@ -77,7 +77,7 @@ pub struct Receiver { #[derive(Clone)] pub struct Established { pub(crate) signer: SigningKey, - pub(crate) framed: Arc>>, + pub(crate) sink: Arc, Message>>>, pub(crate) node: Node, pub(crate) storage: Store, pub(crate) blockchain: Arc, @@ -192,7 +192,7 @@ impl GenServer for RLPxConnection { handle: &GenServerHandle, mut state: Self::State, ) -> Result { - let mut established_state = handshake::perform(state.0).await?; + let (mut established_state, mut stream) = handshake::perform(state.0).await?; log_peer_debug(&established_state.node, "Starting RLPx connection"); if let Err(reason) = post_handshake_checks(established_state.table.clone()).await { connection_failed( @@ -204,7 +204,7 @@ impl GenServer for RLPxConnection { return Err(RLPxError::Disconnected()); } - if let Err(e) = exchange_hello_messages(&mut established_state).await { + if let Err(e) = exchange_hello_messages(&mut established_state, &mut stream).await { connection_failed(&mut established_state, "Hello messages exchange failed", e).await; return Err(RLPxError::Disconnected()); } else { @@ -228,7 +228,7 @@ impl GenServer for RLPxConnection { established_state.inbound, ); } - init_peer_conn(&mut established_state).await?; + init_peer_conn(&mut established_state, &mut stream).await?; log_peer_debug(&established_state.node, "Peer connection initialized."); // Send transactions transaction hashes from mempool at connection start @@ -244,11 +244,7 @@ impl GenServer for RLPxConnection { // TODO this should be replaced with spawned_concurrency::tasks::send_interval once it is properly implemented. send_after(PING_INTERVAL, handle.clone(), CastMessage::SendPing); - spawn_listener( - handle.clone(), - &established_state.node, - &established_state.framed, - ); + spawn_listener(handle.clone(), &established_state.node, stream); spawn_broadcast_listener(handle.clone(), &mut established_state); @@ -262,17 +258,17 @@ impl GenServer for RLPxConnection { &mut self, _message: Self::CallMsg, _handle: &RLPxConnectionHandle, - _state: &mut Self::State, - ) -> CallResponse { - CallResponse::Reply(Ok(OutMessage::Done)) + state: Self::State, + ) -> CallResponse { + CallResponse::Reply(state, Ok(OutMessage::Done)) } async fn handle_cast( &mut self, message: Self::CastMsg, handle: &RLPxConnectionHandle, - state: &mut Self::State, - ) -> CastResponse { + mut state: Self::State, + ) -> CastResponse { if let InnerState::Established(mut established_state) = state.0.clone() { match message { // TODO: handle all these "let _" @@ -298,7 +294,6 @@ impl GenServer for RLPxConnection { } Self::CastMsg::SendNewPooledTxHashes => { let _ = send_new_pooled_tx_hashes(&mut established_state).await; - log_peer_debug(&established_state.node, "SendNewPooledTxHashes sent"); // TODO this should be removed when spawned_concurrency::tasks::send_interval is implemented. send_after( TX_BROADCAST_INTERVAL, @@ -316,11 +311,11 @@ impl GenServer for RLPxConnection { } // Update the state state.0 = InnerState::Established(established_state); - CastResponse::NoReply + CastResponse::NoReply(state) } else { // Received a Cast message but connection is not ready. Log an error but keep the connection alive. error!("Connection not yet established"); - CastResponse::NoReply + CastResponse::NoReply(state) } } } @@ -366,7 +361,10 @@ async fn send_new_pooled_tx_hashes(state: &mut Established) -> Result<(), RLPxEr Ok(()) } -async fn init_peer_conn(state: &mut Established) -> Result<(), RLPxError> { +async fn init_peer_conn(state: &mut Established, stream: &mut S) -> Result<(), RLPxError> +where + S: Unpin + Stream>, +{ // Sending eth Status if peer supports it if let Some(eth) = state.negotiated_eth_capability.clone() { let status = backend::get_status(&state.storage, eth.version).await?; @@ -375,7 +373,7 @@ async fn init_peer_conn(state: &mut Established) -> Result<(), RLPxError> { // The next immediate message in the ETH protocol is the // status, reference here: // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#status-0x00 - let msg = match receive(state).await { + let msg = match receive(stream).await { Some(msg) => msg?, None => return Err(RLPxError::Disconnected()), }; @@ -453,7 +451,7 @@ async fn connection_failed(state: &mut Established, error_text: &str, error: RLP } } - let _ = state.framed.lock().await.close().await; + let _ = state.sink.lock().await.close().await; } fn match_disconnect_reason(error: &RLPxError) -> Option { @@ -466,7 +464,13 @@ fn match_disconnect_reason(error: &RLPxError) -> Option { } } -async fn exchange_hello_messages(state: &mut Established) -> Result<(), RLPxError> { +async fn exchange_hello_messages( + state: &mut Established, + stream: &mut S, +) -> Result<(), RLPxError> +where + S: Unpin + Stream>, +{ let supported_capabilities: Vec = [ &SUPPORTED_ETH_CAPABILITIES[..], &SUPPORTED_SNAP_CAPABILITIES[..], @@ -482,7 +486,7 @@ async fn exchange_hello_messages(state: &mut Established) -> Result<(), RLPxErro send(state, hello_msg).await?; // Receive Hello message - let msg = match receive(state).await { + let msg = match receive(stream).await { Some(msg) => msg?, None => return Err(RLPxError::Disconnected()), }; @@ -547,7 +551,7 @@ async fn exchange_hello_messages(state: &mut Established) -> Result<(), RLPxErro } async fn send(state: &mut Established, message: Message) -> Result<(), RLPxError> { - state.framed.lock().await.send(message).await + state.sink.lock().await.send(message).await } /// Reads from the frame until a frame is available. @@ -561,33 +565,31 @@ async fn send(state: &mut Established, message: Message) -> Result<(), RLPxError /// while sending pings and you should not assume a disconnection. /// /// See [`Framed::new`] for more details. -async fn receive(state: &mut Established) -> Option> { - state.framed.lock().await.next().await +async fn receive(stream: &mut S) -> Option> +where + S: Unpin + Stream>, +{ + stream.next().await } -fn spawn_listener( - mut conn: RLPxConnectionHandle, - node: &Node, - framed: &Arc>>, -) { +fn spawn_listener(mut conn: RLPxConnectionHandle, node: &Node, mut stream: S) +where + S: Unpin + Send + Stream> + 'static, +{ let node = node.clone(); - let framed = framed.clone(); spawned_rt::tasks::spawn(async move { loop { - match framed.lock().await.next().await { + match stream.next().await { Some(message) => match message { Ok(message) => { - log_peer_debug(&node, &format!("Received message {}", message)); let _ = conn.cast(CastMessage::PeerMessage(message)).await; - return Ok(()); } Err(e) => { log_peer_debug(&node, &format!("Received RLPX Error in msg {}", e)); - return Err(e); } }, None => todo!(), - } + }; } }); } From d5b01317ffa0d62e8059e8f0830667b5b8b7a55a Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Tue, 24 Jun 2025 12:39:14 -0300 Subject: [PATCH 08/40] Updated to newest spawned version --- Cargo.lock | 11 ++++---- Cargo.toml | 6 ++--- crates/l2/sequencer/l1_watcher.rs | 27 ++++++++++++------- crates/networking/p2p/discv4/server.rs | 4 +-- crates/networking/p2p/network.rs | 2 +- .../networking/p2p/rlpx/connection/server.rs | 24 +++++++---------- 6 files changed, 39 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e371f1ae9b..42240b558d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9535,7 +9535,7 @@ dependencies = [ [[package]] name = "spawned-concurrency" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?rev=28d2db5ba7dc98e7ec03957691684113d287e05f#28d2db5ba7dc98e7ec03957691684113d287e05f" +source = "git+https://github.com/lambdaclass/spawned.git?rev=928c99f961b8d2dd27efd4eccd151499848f6339#928c99f961b8d2dd27efd4eccd151499848f6339" dependencies = [ "futures", "spawned-rt", @@ -9545,10 +9545,11 @@ dependencies = [ [[package]] name = "spawned-rt" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?rev=28d2db5ba7dc98e7ec03957691684113d287e05f#28d2db5ba7dc98e7ec03957691684113d287e05f" +source = "git+https://github.com/lambdaclass/spawned.git?rev=928c99f961b8d2dd27efd4eccd151499848f6339#928c99f961b8d2dd27efd4eccd151499848f6339" dependencies = [ "crossbeam 0.7.3", "tokio", + "tokio-util", "tracing", "tracing-subscriber 0.3.19", ] @@ -10112,15 +10113,15 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.14" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" dependencies = [ "bytes", "futures-core", "futures-sink", "futures-util", - "hashbrown 0.14.5", + "hashbrown 0.15.2", "pin-project-lite", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index 2a63fae909..f7d55041f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,7 +69,7 @@ crc32fast = "1.4.2" lazy_static = "1.5.0" sha2 = "0.10.8" sha3 = "0.10.8" -tokio-util = { version = "0.7.12", features = ["rt"] } +tokio-util = { version = "0.7.15", features = ["rt"] } jsonwebtoken = "9.3.0" rand = "0.8.5" cfg-if = "1.0.0" @@ -88,5 +88,5 @@ libsecp256k1 = "0.7.2" clap = { version = "4.3", features = ["derive", "env"] } clap_complete = "4.5.17" eyre = "0.6.12" -spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", rev = "28d2db5ba7dc98e7ec03957691684113d287e05f"} -spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", rev = "28d2db5ba7dc98e7ec03957691684113d287e05f"} +spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", rev = "928c99f961b8d2dd27efd4eccd151499848f6339"} +spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", rev = "928c99f961b8d2dd27efd4eccd151499848f6339"} diff --git a/crates/l2/sequencer/l1_watcher.rs b/crates/l2/sequencer/l1_watcher.rs index 83205f4785..d5fe7158a7 100644 --- a/crates/l2/sequencer/l1_watcher.rs +++ b/crates/l2/sequencer/l1_watcher.rs @@ -17,9 +17,8 @@ use tracing::{debug, error, info, warn}; use super::utils::random_duration; use spawned_concurrency::tasks::{ - send_after, CallResponse, CastResponse, GenServer, GenServerInMsg, + send_after, CallResponse, CastResponse, GenServer, GenServerHandle, }; -use spawned_rt::tasks::mpsc::Sender; #[derive(Clone)] pub struct L1WatcherState { @@ -76,9 +75,7 @@ impl L1Watcher { pub async fn spawn(store: Store, blockchain: Arc, cfg: SequencerConfig) { match L1WatcherState::new(store.clone(), blockchain.clone(), &cfg.eth, &cfg.l1_watcher) { Ok(state) => { - let mut l1_watcher = L1Watcher::start(state); - // Perform the check and suscribe a periodic Watch. - let _ = l1_watcher.cast(InMessage::Watch).await; + let _ = L1Watcher::start(state); } Err(error) => error!("L1 Watcher Error: {}", error), }; @@ -96,25 +93,35 @@ impl GenServer for L1Watcher { Self {} } + async fn init( + &mut self, + handle: &GenServerHandle, + state: Self::State, + ) -> Result { + // Perform the check and suscribe a periodic Watch. + let _ = handle.clone().cast(InMessage::Watch).await; + Ok(state) + } + async fn handle_call( &mut self, _message: Self::CallMsg, _handle: &GenServerHandle, state: Self::State, - ) -> CallResponse { - CallResponse::Reply(state, OutMessage::Done) + ) -> CallResponse { + CallResponse::Reply(state, Self::OutMsg::Done) } async fn handle_cast( &mut self, message: Self::CastMsg, handle: &GenServerHandle, - state: Self::State, + mut state: Self::State, ) -> CastResponse { match message { - Self::InMsg::Watch => { + Self::CastMsg::Watch => { let check_interval = random_duration(state.check_interval); - send_after(check_interval, handle.clone(), Self::InMsg::Watch); + send_after(check_interval, handle.clone(), Self::CastMsg::Watch); match get_logs(&mut state).await { Ok(logs) => { // We may not have a deposit nor a withdrawal, that means no events -> no logs. diff --git a/crates/networking/p2p/discv4/server.rs b/crates/networking/p2p/discv4/server.rs index 24797859f8..97a1ad8240 100644 --- a/crates/networking/p2p/discv4/server.rs +++ b/crates/networking/p2p/discv4/server.rs @@ -225,7 +225,7 @@ impl Discv4Server { return Ok(()); } - RLPxConnection::spawn_as_initiator(self.ctx.clone(), &peer.node).await; + let _ = RLPxConnection::spawn_as_initiator(self.ctx.clone(), &peer.node).await; Ok(()) } @@ -518,7 +518,7 @@ impl Discv4Server { return Ok(()); } - RLPxConnection::spawn_as_initiator(self.ctx.clone(), &peer.node).await; + let _ = RLPxConnection::spawn_as_initiator(self.ctx.clone(), &peer.node).await; Ok(()) } diff --git a/crates/networking/p2p/network.rs b/crates/networking/p2p/network.rs index b8edfa0be4..df7ce13946 100644 --- a/crates/networking/p2p/network.rs +++ b/crates/networking/p2p/network.rs @@ -129,7 +129,7 @@ pub(crate) async fn serve_p2p_requests(context: P2PContext) { } }; - RLPxConnection::spawn_as_receiver(context.clone(), peer_addr, stream).await; + let _ = RLPxConnection::spawn_as_receiver(context.clone(), peer_addr, stream).await; } } diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index cc0c1def0f..2d46db942e 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -10,7 +10,7 @@ use futures::{stream::SplitSink, SinkExt as _, Stream}; use k256::{ecdsa::SigningKey, PublicKey}; use rand::random; use spawned_concurrency::tasks::{ - send_after, CallResponse, CastResponse, GenServer, GenServerHandle, + send_interval, CallResponse, CastResponse, GenServer, GenServerHandle, }; use tokio::{ net::{TcpSocket, TcpStream}, @@ -125,8 +125,11 @@ impl RLPxConnectionState { } } +#[derive(Clone)] pub enum CallMessage {} +#[derive(Clone)] +#[allow(private_interfaces)] pub enum CastMessage { PeerMessage(Message), BackendMessage(Message), @@ -136,6 +139,7 @@ pub enum CastMessage { } #[derive(Clone)] +#[allow(private_interfaces)] pub enum OutMessage { InitResponse { node: Node, @@ -234,15 +238,15 @@ impl GenServer for RLPxConnection { // Send transactions transaction hashes from mempool at connection start send_new_pooled_tx_hashes(&mut established_state).await?; - // TODO this should be replaced with spawned_concurrency::tasks::send_interval once it is properly implemented. - send_after( + // Periodic broadcast check repeated events. + send_interval( TX_BROADCAST_INTERVAL, handle.clone(), CastMessage::SendNewPooledTxHashes, ); - // TODO this should be replaced with spawned_concurrency::tasks::send_interval once it is properly implemented. - send_after(PING_INTERVAL, handle.clone(), CastMessage::SendPing); + // Periodic Pings repeated events. + send_interval(PING_INTERVAL, handle.clone(), CastMessage::SendPing); spawn_listener(handle.clone(), &established_state.node, stream); @@ -266,7 +270,7 @@ impl GenServer for RLPxConnection { async fn handle_cast( &mut self, message: Self::CastMsg, - handle: &RLPxConnectionHandle, + _handle: &RLPxConnectionHandle, mut state: Self::State, ) -> CastResponse { if let InnerState::Established(mut established_state) = state.0.clone() { @@ -289,17 +293,9 @@ impl GenServer for RLPxConnection { Self::CastMsg::SendPing => { let _ = send(&mut established_state, Message::Ping(PingMessage {})).await; log_peer_debug(&established_state.node, "Ping sent"); - // TODO this should be removed when spawned_concurrency::tasks::send_interval is implemented. - send_after(PING_INTERVAL, handle.clone(), CastMessage::SendPing); } Self::CastMsg::SendNewPooledTxHashes => { let _ = send_new_pooled_tx_hashes(&mut established_state).await; - // TODO this should be removed when spawned_concurrency::tasks::send_interval is implemented. - send_after( - TX_BROADCAST_INTERVAL, - handle.clone(), - CastMessage::SendNewPooledTxHashes, - ); } Self::CastMsg::BroadcastMessage(id, msg) => { log_peer_debug( From b5a33891947f8d5f8ef1f52b98fd410127774a1b Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Tue, 24 Jun 2025 16:27:19 -0300 Subject: [PATCH 09/40] Updated to newest spawned version --- Cargo.lock | 4 ++-- Cargo.toml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 42240b558d..5970cbf2da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9535,7 +9535,7 @@ dependencies = [ [[package]] name = "spawned-concurrency" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?rev=928c99f961b8d2dd27efd4eccd151499848f6339#928c99f961b8d2dd27efd4eccd151499848f6339" +source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.1-alpha#481439d3d147fa741d67bb5ff047f8c68bcefb83" dependencies = [ "futures", "spawned-rt", @@ -9545,7 +9545,7 @@ dependencies = [ [[package]] name = "spawned-rt" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?rev=928c99f961b8d2dd27efd4eccd151499848f6339#928c99f961b8d2dd27efd4eccd151499848f6339" +source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.1-alpha#481439d3d147fa741d67bb5ff047f8c68bcefb83" dependencies = [ "crossbeam 0.7.3", "tokio", diff --git a/Cargo.toml b/Cargo.toml index f7d55041f8..11721b1206 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,5 +88,5 @@ libsecp256k1 = "0.7.2" clap = { version = "4.3", features = ["derive", "env"] } clap_complete = "4.5.17" eyre = "0.6.12" -spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", rev = "928c99f961b8d2dd27efd4eccd151499848f6339"} -spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", rev = "928c99f961b8d2dd27efd4eccd151499848f6339"} +spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", tag = "v0.1.1-alpha"} +spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", tag = "v0.1.1-alpha"} From 377e47bc69ddb8b277e8ea41381089d3205e72b6 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Wed, 25 Jun 2025 14:06:34 -0300 Subject: [PATCH 10/40] Fixed merge problems --- crates/l2/based/block_fetcher.rs | 35 ++++----- crates/l2/based/state_updater.rs | 33 +++++---- crates/l2/sequencer/block_producer.rs | 34 ++++----- crates/l2/sequencer/errors.rs | 2 +- crates/l2/sequencer/l1_committer.rs | 32 +++++---- crates/l2/sequencer/l1_proof_sender.rs | 32 +++++---- crates/l2/sequencer/l1_watcher.rs | 14 ++-- crates/l2/sequencer/proof_coordinator.rs | 71 +++++++++++-------- crates/networking/p2p/kademlia.rs | 2 +- .../p2p/rlpx/connection/handshake.rs | 9 ++- .../networking/p2p/rlpx/connection/server.rs | 26 +++---- 11 files changed, 161 insertions(+), 129 deletions(-) diff --git a/crates/l2/based/block_fetcher.rs b/crates/l2/based/block_fetcher.rs index 0f074357e1..6f066eb4f8 100644 --- a/crates/l2/based/block_fetcher.rs +++ b/crates/l2/based/block_fetcher.rs @@ -18,7 +18,9 @@ use ethrex_storage::Store; use ethrex_storage_rollup::StoreRollup; use ethrex_vm::{Evm, EvmEngine}; use keccak_hash::keccak; -use spawned_concurrency::{CallResponse, CastResponse, GenServer, send_after}; +use spawned_concurrency::tasks::{ + CallResponse, CastResponse, GenServer, GenServerError, GenServerHandle, send_after, +}; use tracing::{debug, error, info}; use crate::{ @@ -54,7 +56,7 @@ pub enum BlockFetcherError { #[error("Failed to compute deposit logs hash: {0}")] DepositError(#[from] ethrex_l2_common::deposits::DepositError), #[error("Spawned GenServer Error")] - GenServerError(spawned_concurrency::GenServerError), + GenServerError(GenServerError), } #[derive(Clone)] @@ -128,7 +130,8 @@ impl BlockFetcher { } impl GenServer for BlockFetcher { - type InMsg = InMessage; + type CallMsg = (); + type CastMsg = InMessage; type OutMsg = OutMessage; type State = BlockFetcherState; type Error = BlockFetcherError; @@ -139,30 +142,30 @@ impl GenServer for BlockFetcher { async fn handle_call( &mut self, - _message: Self::InMsg, - _tx: &spawned_rt::mpsc::Sender>, - _state: &mut Self::State, - ) -> spawned_concurrency::CallResponse { - CallResponse::Reply(OutMessage::Done) + _message: Self::CallMsg, + _handle: &GenServerHandle, + state: Self::State, + ) -> CallResponse { + CallResponse::Reply(state, Self::OutMsg::Done) } async fn handle_cast( &mut self, - _message: Self::InMsg, - _tx: &spawned_rt::mpsc::Sender>, - state: &mut Self::State, - ) -> spawned_concurrency::CastResponse { + _message: Self::CastMsg, + handle: &GenServerHandle, + mut state: Self::State, + ) -> CastResponse { if let SequencerStatus::Following = state.sequencer_state.status().await { - let _ = fetch(state).await.inspect_err(|err| { + let _ = fetch(&mut state).await.inspect_err(|err| { error!("Block Fetcher Error: {err}"); }); } send_after( Duration::from_millis(state.fetch_interval_ms), - _tx.clone(), - Self::InMsg::Fetch, + handle.clone(), + Self::CastMsg::Fetch, ); - CastResponse::NoReply + CastResponse::NoReply(state) } } diff --git a/crates/l2/based/state_updater.rs b/crates/l2/based/state_updater.rs index 26876f823f..cee4a1397b 100644 --- a/crates/l2/based/state_updater.rs +++ b/crates/l2/based/state_updater.rs @@ -6,7 +6,9 @@ use ethrex_l2_sdk::calldata::encode_calldata; use ethrex_rpc::{EthClient, clients::Overrides}; use ethrex_storage::Store; use ethrex_storage_rollup::StoreRollup; -use spawned_concurrency::{CallResponse, CastResponse, GenServer, GenServerError, send_after}; +use spawned_concurrency::tasks::{ + CallResponse, CastResponse, GenServer, GenServerError, GenServerHandle, send_after, +}; use tracing::{debug, error, info, warn}; use crate::{ @@ -97,7 +99,8 @@ impl StateUpdater { } impl GenServer for StateUpdater { - type InMsg = InMessage; + type CallMsg = (); + type CastMsg = InMessage; type OutMsg = OutMessage; type State = StateUpdaterState; type Error = StateUpdaterError; @@ -108,28 +111,28 @@ impl GenServer for StateUpdater { async fn handle_call( &mut self, - _message: Self::InMsg, - _tx: &spawned_rt::mpsc::Sender>, - _state: &mut Self::State, - ) -> spawned_concurrency::CallResponse { - CallResponse::Reply(OutMessage::Done) + _message: Self::CallMsg, + _handle: &GenServerHandle, + state: Self::State, + ) -> CallResponse { + CallResponse::Reply(state, Self::OutMsg::Done) } async fn handle_cast( &mut self, - _message: Self::InMsg, - tx: &spawned_rt::mpsc::Sender>, - state: &mut Self::State, - ) -> spawned_concurrency::CastResponse { - let _ = update_state(state) + _message: Self::CastMsg, + handle: &GenServerHandle, + mut state: Self::State, + ) -> CastResponse { + let _ = update_state(&mut state) .await .inspect_err(|err| error!("State Updater Error: {err}")); send_after( Duration::from_millis(state.check_interval_ms), - tx.clone(), - Self::InMsg::UpdateState, + handle.clone(), + Self::CastMsg::UpdateState, ); - CastResponse::NoReply + CastResponse::NoReply(state) } } diff --git a/crates/l2/sequencer/block_producer.rs b/crates/l2/sequencer/block_producer.rs index b75a4cc60d..c5130a3a2e 100644 --- a/crates/l2/sequencer/block_producer.rs +++ b/crates/l2/sequencer/block_producer.rs @@ -16,8 +16,9 @@ use ethrex_storage_rollup::StoreRollup; use ethrex_vm::BlockExecutionResult; use keccak_hash::H256; use payload_builder::build_payload; -use spawned_concurrency::{CallResponse, CastResponse, GenServer, GenServerInMsg, send_after}; -use spawned_rt::mpsc::Sender; +use spawned_concurrency::tasks::{ + CallResponse, CastResponse, GenServer, GenServerHandle, send_after, +}; use tracing::{debug, error, info}; use crate::{ @@ -104,7 +105,8 @@ impl BlockProducer { } impl GenServer for BlockProducer { - type InMsg = InMessage; + type CallMsg = (); + type CastMsg = InMessage; type OutMsg = OutMessage; type State = BlockProducerState; @@ -116,31 +118,31 @@ impl GenServer for BlockProducer { async fn handle_call( &mut self, - _message: Self::InMsg, - _tx: &Sender>, - _state: &mut Self::State, - ) -> CallResponse { - CallResponse::Reply(OutMessage::Done) + _message: Self::CallMsg, + _handle: &GenServerHandle, + state: Self::State, + ) -> CallResponse { + CallResponse::Reply(state, Self::OutMsg::Done) } async fn handle_cast( &mut self, - _message: Self::InMsg, - tx: &Sender>, - state: &mut Self::State, - ) -> CastResponse { + _message: Self::CastMsg, + handle: &GenServerHandle, + mut state: Self::State, + ) -> CastResponse { // Right now we only have the Produce message, so we ignore the message if let SequencerStatus::Sequencing = state.sequencer_state.status().await { - let _ = produce_block(state) + let _ = produce_block(&mut state) .await .inspect_err(|e| error!("Block Producer Error: {e}")); } send_after( Duration::from_millis(state.block_time_ms), - tx.clone(), - Self::InMsg::Produce, + handle.clone(), + Self::CastMsg::Produce, ); - CastResponse::NoReply + CastResponse::NoReply(state) } } diff --git a/crates/l2/sequencer/errors.rs b/crates/l2/sequencer/errors.rs index a5da58c48b..3e54212950 100644 --- a/crates/l2/sequencer/errors.rs +++ b/crates/l2/sequencer/errors.rs @@ -14,7 +14,7 @@ use ethrex_rpc::clients::EngineClientError; use ethrex_rpc::clients::eth::errors::{CalldataEncodeError, EthClientError}; use ethrex_storage::error::StoreError; use ethrex_vm::{EvmError, ProverDBError}; -use spawned_concurrency::GenServerError; +use spawned_concurrency::tasks::GenServerError; use tokio::task::JoinError; #[derive(Debug, thiserror::Error)] diff --git a/crates/l2/sequencer/l1_committer.rs b/crates/l2/sequencer/l1_committer.rs index 172769ecb5..52ee45fc03 100644 --- a/crates/l2/sequencer/l1_committer.rs +++ b/crates/l2/sequencer/l1_committer.rs @@ -34,8 +34,9 @@ use std::collections::HashMap; use tracing::{debug, error, info, warn}; use super::{errors::BlobEstimationError, utils::random_duration}; -use spawned_concurrency::{CallResponse, CastResponse, GenServer, GenServerInMsg, send_after}; -use spawned_rt::mpsc::Sender; +use spawned_concurrency::tasks::{ + CallResponse, CastResponse, GenServer, GenServerHandle, send_after, +}; const COMMIT_FUNCTION_SIGNATURE_BASED: &str = "commitBatch(uint256,bytes32,bytes32,bytes32,bytes32,bytes[])"; @@ -128,7 +129,8 @@ impl L1Committer { } impl GenServer for L1Committer { - type InMsg = InMessage; + type CallMsg = (); + type CastMsg = InMessage; type OutMsg = OutMessage; type State = CommitterState; @@ -140,28 +142,28 @@ impl GenServer for L1Committer { async fn handle_call( &mut self, - _message: Self::InMsg, - _tx: &Sender>, - _state: &mut Self::State, - ) -> CallResponse { - CallResponse::Reply(OutMessage::Done) + _message: Self::CallMsg, + _handle: &GenServerHandle, + state: Self::State, + ) -> CallResponse { + CallResponse::Reply(state, Self::OutMsg::Done) } async fn handle_cast( &mut self, - _message: Self::InMsg, - tx: &Sender>, - state: &mut Self::State, - ) -> CastResponse { + _message: Self::CastMsg, + handle: &GenServerHandle, + mut state: Self::State, + ) -> CastResponse { // Right now we only have the Commit message, so we ignore the message if let SequencerStatus::Sequencing = state.sequencer_state.status().await { - let _ = commit_next_batch_to_l1(state) + let _ = commit_next_batch_to_l1(&mut state) .await .inspect_err(|err| error!("L1 Committer Error: {err}")); } let check_interval = random_duration(state.commit_time_ms); - send_after(check_interval, tx.clone(), Self::InMsg::Commit); - CastResponse::NoReply + send_after(check_interval, handle.clone(), Self::CastMsg::Commit); + CastResponse::NoReply(state) } } diff --git a/crates/l2/sequencer/l1_proof_sender.rs b/crates/l2/sequencer/l1_proof_sender.rs index 3d1fa6968f..9fd07f7933 100644 --- a/crates/l2/sequencer/l1_proof_sender.rs +++ b/crates/l2/sequencer/l1_proof_sender.rs @@ -5,8 +5,9 @@ use ethrex_l2_sdk::calldata::{Value, encode_calldata}; use ethrex_rpc::EthClient; use ethrex_storage_rollup::StoreRollup; use secp256k1::SecretKey; -use spawned_concurrency::{CallResponse, CastResponse, GenServer, GenServerInMsg, send_after}; -use spawned_rt::mpsc::Sender; +use spawned_concurrency::tasks::{ + CallResponse, CastResponse, GenServer, GenServerHandle, send_after, +}; use tracing::{debug, error, info}; use super::{ @@ -139,7 +140,8 @@ impl L1ProofSender { } impl GenServer for L1ProofSender { - type InMsg = InMessage; + type CallMsg = (); + type CastMsg = InMessage; type OutMsg = OutMessage; type State = L1ProofSenderState; @@ -151,28 +153,28 @@ impl GenServer for L1ProofSender { async fn handle_call( &mut self, - _message: Self::InMsg, - _tx: &Sender>, - _state: &mut Self::State, - ) -> CallResponse { - CallResponse::Reply(OutMessage::Done) + _message: Self::CallMsg, + _handle: &GenServerHandle, + state: Self::State, + ) -> CallResponse { + CallResponse::Reply(state, OutMessage::Done) } async fn handle_cast( &mut self, - _message: Self::InMsg, - tx: &Sender>, - state: &mut Self::State, - ) -> CastResponse { + _message: Self::CastMsg, + handle: &GenServerHandle, + state: Self::State, + ) -> CastResponse { // Right now we only have the Send message, so we ignore the message if let SequencerStatus::Sequencing = state.sequencer_state.status().await { - let _ = verify_and_send_proof(state) + let _ = verify_and_send_proof(&state) .await .inspect_err(|err| error!("L1 Proof Sender: {err}")); } let check_interval = random_duration(state.proof_send_interval_ms); - send_after(check_interval, tx.clone(), Self::InMsg::Send); - CastResponse::NoReply + send_after(check_interval, handle.clone(), Self::CastMsg::Send); + CastResponse::NoReply(state) } } diff --git a/crates/l2/sequencer/l1_watcher.rs b/crates/l2/sequencer/l1_watcher.rs index 1124c1289b..2189ab765c 100644 --- a/crates/l2/sequencer/l1_watcher.rs +++ b/crates/l2/sequencer/l1_watcher.rs @@ -13,12 +13,11 @@ use ethrex_rpc::{ }; use ethrex_storage::Store; use keccak_hash::keccak; -use std::{cmp::min, sync::Arc}; -use tracing::{debug, error, info, warn}; -use super::utils::random_duration; use spawned_concurrency::tasks::{ - send_after, CallResponse, CastResponse, GenServer, GenServerHandle, + CallResponse, CastResponse, GenServer, GenServerHandle, send_after, }; +use std::{cmp::min, sync::Arc}; +use tracing::{debug, error, info, warn}; #[derive(Clone)] pub struct L1WatcherState { @@ -89,6 +88,7 @@ impl L1Watcher { sequencer_state, )?; L1Watcher::start(state); + Ok(()) } } @@ -109,7 +109,11 @@ impl GenServer for L1Watcher { state: Self::State, ) -> Result { // Perform the check and suscribe a periodic Watch. - handle.clone().cast(Self::CastMsg::Watch).await.map_err(Self::Error::GenServerError)?; + handle + .clone() + .cast(Self::CastMsg::Watch) + .await + .map_err(Self::Error::GenServerError)?; Ok(state) } diff --git a/crates/l2/sequencer/proof_coordinator.rs b/crates/l2/sequencer/proof_coordinator.rs index 82d2a81ade..f16e4e2b03 100644 --- a/crates/l2/sequencer/proof_coordinator.rs +++ b/crates/l2/sequencer/proof_coordinator.rs @@ -22,7 +22,7 @@ use ethrex_storage_rollup::StoreRollup; use secp256k1::SecretKey; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use spawned_concurrency::{CallResponse, CastResponse, GenServer}; +use spawned_concurrency::tasks::{CallResponse, CastResponse, GenServer, GenServerHandle}; use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use tokio::{ @@ -199,8 +199,9 @@ impl ProofCoordinatorState { } } +#[derive(Clone)] pub enum ProofCordInMessage { - Listen { listener: TcpListener }, + Listen { listener: Arc }, } #[derive(Clone, PartialEq)] @@ -229,7 +230,8 @@ impl ProofCoordinator { needed_proof_types, ) .await?; - let listener = TcpListener::bind(format!("{}:{}", state.listen_ip, state.port)).await?; + let listener = + Arc::new(TcpListener::bind(format!("{}:{}", state.listen_ip, state.port)).await?); let mut proof_coordinator = ProofCoordinator::start(state); let _ = proof_coordinator .cast(ProofCordInMessage::Listen { listener }) @@ -239,7 +241,8 @@ impl ProofCoordinator { } impl GenServer for ProofCoordinator { - type InMsg = ProofCordInMessage; + type CallMsg = (); + type CastMsg = ProofCordInMessage; type OutMsg = ProofCordOutMessage; type State = ProofCoordinatorState; type Error = ProofCoordinatorError; @@ -250,30 +253,30 @@ impl GenServer for ProofCoordinator { async fn handle_call( &mut self, - _message: Self::InMsg, - _tx: &spawned_rt::mpsc::Sender>, - _state: &mut Self::State, - ) -> CallResponse { - CallResponse::Reply(ProofCordOutMessage::Done) + _message: Self::CallMsg, + _handle: &GenServerHandle, + state: Self::State, + ) -> CallResponse { + CallResponse::Reply(state, ProofCordOutMessage::Done) } async fn handle_cast( &mut self, - message: Self::InMsg, - _tx: &spawned_rt::mpsc::Sender>, - state: &mut Self::State, - ) -> CastResponse { + message: Self::CastMsg, + _handle: &GenServerHandle, + state: Self::State, + ) -> CastResponse { info!("Receiving message"); match message { ProofCordInMessage::Listen { listener } => { - handle_listens(state, listener).await; + handle_listens(&state, listener).await; } } CastResponse::Stop } } -async fn handle_listens(state: &ProofCoordinatorState, listener: TcpListener) { +async fn handle_listens(state: &ProofCoordinatorState, listener: Arc) { info!("Starting TCP server at {}:{}.", state.listen_ip, state.port); loop { let res = listener.accept().await; @@ -309,14 +312,21 @@ impl ConnectionHandler { ) -> Result<(), ConnectionHandlerError> { let mut connection_handler = ConnectionHandler::start(state); connection_handler - .cast(ConnInMessage::Connection { stream, addr }) + .cast(ConnInMessage::Connection { + stream: Arc::new(stream), + addr, + }) .await .map_err(ConnectionHandlerError::GenServerError) } } +#[derive(Clone)] pub enum ConnInMessage { - Connection { stream: TcpStream, addr: SocketAddr }, + Connection { + stream: Arc, + addr: SocketAddr, + }, } #[derive(Clone, PartialEq)] @@ -325,7 +335,8 @@ pub enum ConnOutMessage { } impl GenServer for ConnectionHandler { - type InMsg = ConnInMessage; + type CallMsg = ConnInMessage; + type CastMsg = ConnInMessage; type OutMsg = ConnOutMessage; type State = ProofCoordinatorState; type Error = ProofCoordinatorError; @@ -336,23 +347,23 @@ impl GenServer for ConnectionHandler { async fn handle_call( &mut self, - _message: Self::InMsg, - _tx: &spawned_rt::mpsc::Sender>, - _state: &mut Self::State, - ) -> CallResponse { - CallResponse::Reply(ConnOutMessage::Done) + _message: Self::CallMsg, + _handle: &GenServerHandle, + state: Self::State, + ) -> CallResponse { + CallResponse::Reply(state, ConnOutMessage::Done) } async fn handle_cast( &mut self, - message: Self::InMsg, - _tx: &spawned_rt::mpsc::Sender>, - state: &mut Self::State, - ) -> CastResponse { + message: Self::CastMsg, + _handle: &GenServerHandle, + state: Self::State, + ) -> CastResponse { info!("Receiving message"); match message { ConnInMessage::Connection { stream, addr } => { - if let Err(err) = handle_connection(state, stream).await { + if let Err(err) = handle_connection(&state, stream).await { error!("Error handling connection from {addr}: {err}"); } else { debug!("Connection from {addr} handled successfully"); @@ -365,9 +376,11 @@ impl GenServer for ConnectionHandler { async fn handle_connection( state: &ProofCoordinatorState, - mut stream: TcpStream, + stream: Arc, ) -> Result<(), ProofCoordinatorError> { let mut buffer = Vec::new(); + // TODO: This should be fixed in https://github.com/lambdaclass/ethrex/issues/3316 + let mut stream = Arc::try_unwrap(stream).unwrap(); stream.read_to_end(&mut buffer).await?; let data: Result = serde_json::from_slice(&buffer); diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index e99fcb777d..089078c1c5 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -4,8 +4,8 @@ use crate::{ types::{Node, NodeRecord}, }; use ethrex_common::{H256, U256}; -use spawned_concurrency::tasks::GenServerHandle; use rand::random; +use spawned_concurrency::tasks::GenServerHandle; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tokio::sync::{Mutex, mpsc}; diff --git a/crates/networking/p2p/rlpx/connection/handshake.rs b/crates/networking/p2p/rlpx/connection/handshake.rs index e19f26ec86..e910982638 100644 --- a/crates/networking/p2p/rlpx/connection/handshake.rs +++ b/crates/networking/p2p/rlpx/connection/handshake.rs @@ -1,4 +1,7 @@ -use std::{collections::{HashMap, HashSet}, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use crate::{ rlpx::{ @@ -19,7 +22,7 @@ use ethrex_rlp::{ error::RLPDecodeError, structs::{Decoder, Encoder}, }; -use futures::{stream::SplitStream, StreamExt}; +use futures::{StreamExt, stream::SplitStream}; use k256::{ PublicKey, SecretKey, ecdsa::{self, RecoveryId, SigningKey, VerifyingKey}, @@ -109,7 +112,7 @@ pub(crate) async fn perform( (context, node, Framed::new(stream, codec), true) } InnerState::Established(_) => { - return Err(RLPxError::StateError("Already established".to_string())) + return Err(RLPxError::StateError("Already established".to_string())); } }; let (sink, stream) = framed.split(); diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 0098f88fe9..23f05b5623 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -1,20 +1,25 @@ -use std::{collections::{HashMap, HashSet}, net::SocketAddr, sync::Arc, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + net::SocketAddr, + sync::Arc, + time::Duration, +}; use ethrex_blockchain::Blockchain; use ethrex_common::{ - types::{MempoolTransaction, Transaction}, H256, + types::{MempoolTransaction, Transaction}, }; use ethrex_storage::Store; -use futures::{stream::SplitSink, SinkExt as _, Stream}; -use k256::{ecdsa::SigningKey, PublicKey}; +use futures::{SinkExt as _, Stream, stream::SplitSink}; +use k256::{PublicKey, ecdsa::SigningKey}; use rand::random; use spawned_concurrency::tasks::{ - send_interval, CallResponse, CastResponse, GenServer, GenServerHandle, + CallResponse, CastResponse, GenServer, GenServerHandle, send_interval, }; use tokio::{ net::{TcpSocket, TcpStream}, - sync::{broadcast, mpsc::Sender, Mutex}, + sync::{Mutex, broadcast, mpsc::Sender}, task, }; use tokio_stream::StreamExt; @@ -317,10 +322,7 @@ impl GenServer for RLPxConnection { let _ = handle_broadcast(&mut established_state, (id, msg)).await; } Self::CastMsg::BlockRangeUpdate => { - log_peer_debug( - &established_state.node, - &format!("Block Range Update"), - ); + log_peer_debug(&established_state.node, &format!("Block Range Update")); let _ = handle_block_range_update(&mut established_state).await; } } @@ -829,9 +831,7 @@ async fn handle_broadcast( Ok(()) } -async fn handle_block_range_update( - state: &mut Established, -) -> Result<(), RLPxError> { +async fn handle_block_range_update(state: &mut Established) -> Result<(), RLPxError> { if should_send_block_range_update(state).await? { send_block_range_update(state).await } else { From e89778123128de6a243646f4e31a2dda41b1c5d6 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Wed, 25 Jun 2025 14:30:19 -0300 Subject: [PATCH 11/40] Fixed linter issues --- crates/networking/p2p/rlpx/connection/server.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 23f05b5623..c377533317 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -220,7 +220,7 @@ impl GenServer for RLPxConnection { if let Err(e) = exchange_hello_messages(&mut established_state, &mut stream).await { connection_failed(&mut established_state, "Hello messages exchange failed", e).await; - return Err(RLPxError::Disconnected()); + Err(RLPxError::Disconnected()) } else { // Handshake OK: handle connection // Create channels to communicate directly to the peer @@ -322,7 +322,7 @@ impl GenServer for RLPxConnection { let _ = handle_broadcast(&mut established_state, (id, msg)).await; } Self::CastMsg::BlockRangeUpdate => { - log_peer_debug(&established_state.node, &format!("Block Range Update")); + log_peer_debug(&established_state.node, "Block Range Update"); let _ = handle_block_range_update(&mut established_state).await; } } @@ -788,7 +788,7 @@ async fn handle_peer_message(state: &mut Established, message: Message) -> Resul .backend_channel .as_mut() // TODO: this unwrap() is temporary, until we fix the backend process to use spawned - .unwrap() + .expect("Backend channel is not available") .send(message) .await? } From 1a0942bca975f70c602e672b06b2755b1c9a849c Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Wed, 25 Jun 2025 16:13:23 -0300 Subject: [PATCH 12/40] Fixed linter issues --- crates/l2/sequencer/block_producer.rs | 4 +- crates/l2/sequencer/proof_coordinator.rs | 71 +++++++++++++----------- 2 files changed, 41 insertions(+), 34 deletions(-) diff --git a/crates/l2/sequencer/block_producer.rs b/crates/l2/sequencer/block_producer.rs index c5130a3a2e..7273157c7a 100644 --- a/crates/l2/sequencer/block_producer.rs +++ b/crates/l2/sequencer/block_producer.rs @@ -129,11 +129,11 @@ impl GenServer for BlockProducer { &mut self, _message: Self::CastMsg, handle: &GenServerHandle, - mut state: Self::State, + state: Self::State, ) -> CastResponse { // Right now we only have the Produce message, so we ignore the message if let SequencerStatus::Sequencing = state.sequencer_state.status().await { - let _ = produce_block(&mut state) + let _ = produce_block(&state) .await .inspect_err(|e| error!("Block Producer Error: {e}")); } diff --git a/crates/l2/sequencer/proof_coordinator.rs b/crates/l2/sequencer/proof_coordinator.rs index f16e4e2b03..b19f43b4b2 100644 --- a/crates/l2/sequencer/proof_coordinator.rs +++ b/crates/l2/sequencer/proof_coordinator.rs @@ -380,41 +380,48 @@ async fn handle_connection( ) -> Result<(), ProofCoordinatorError> { let mut buffer = Vec::new(); // TODO: This should be fixed in https://github.com/lambdaclass/ethrex/issues/3316 - let mut stream = Arc::try_unwrap(stream).unwrap(); - stream.read_to_end(&mut buffer).await?; - - let data: Result = serde_json::from_slice(&buffer); - match data { - Ok(ProofData::BatchRequest) => { - if let Err(e) = handle_request(state, &mut stream).await { - error!("Failed to handle BatchRequest: {e}"); - } - } - Ok(ProofData::ProofSubmit { - batch_number, - batch_proof, - }) => { - if let Err(e) = handle_submit(&mut stream, batch_number, batch_proof).await { - error!("Failed to handle ProofSubmit: {e}"); - } + // (stream should not be wrapped in an Arc) + match Arc::try_unwrap(stream) { + Err(_) => { + error!("Unable to use stream"); } - Ok(ProofData::ProverSetup { - prover_type, - payload, - }) => { - if let Err(e) = handle_setup(state, &mut stream, prover_type, payload).await { - error!("Failed to handle ProverSetup: {e}"); + Ok(mut stream) => { + stream.read_to_end(&mut buffer).await?; + + let data: Result = serde_json::from_slice(&buffer); + match data { + Ok(ProofData::BatchRequest) => { + if let Err(e) = handle_request(state, &mut stream).await { + error!("Failed to handle BatchRequest: {e}"); + } + } + Ok(ProofData::ProofSubmit { + batch_number, + batch_proof, + }) => { + if let Err(e) = handle_submit(&mut stream, batch_number, batch_proof).await { + error!("Failed to handle ProofSubmit: {e}"); + } + } + Ok(ProofData::ProverSetup { + prover_type, + payload, + }) => { + if let Err(e) = handle_setup(state, &mut stream, prover_type, payload).await { + error!("Failed to handle ProverSetup: {e}"); + } + } + Err(e) => { + warn!("Failed to parse request: {e}"); + } + _ => { + warn!("Invalid request"); + } } - } - Err(e) => { - warn!("Failed to parse request: {e}"); - } - _ => { - warn!("Invalid request"); - } - } - debug!("Connection closed"); + debug!("Connection closed"); + } + }; Ok(()) } From 30fc9c466c362fa081007c14e42405135f520f76 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Wed, 25 Jun 2025 18:12:09 -0300 Subject: [PATCH 13/40] Fixed failing test on discv4 --- .../p2p/rlpx/connection/handshake.rs | 24 ++++++++++++------- .../networking/p2p/rlpx/connection/server.rs | 21 +++------------- 2 files changed, 18 insertions(+), 27 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection/handshake.rs b/crates/networking/p2p/rlpx/connection/handshake.rs index c5b0de5b82..a26b8f06ca 100644 --- a/crates/networking/p2p/rlpx/connection/handshake.rs +++ b/crates/networking/p2p/rlpx/connection/handshake.rs @@ -1,5 +1,6 @@ use std::{ collections::{HashMap, HashSet}, + net::SocketAddr, sync::Arc, }; @@ -32,7 +33,7 @@ use rand::Rng; use sha3::{Digest, Keccak256}; use tokio::{ io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, - net::TcpStream, + net::{TcpSocket, TcpStream}, sync::Mutex, }; use tokio_util::codec::Framed; @@ -64,14 +65,15 @@ pub(crate) async fn perform( state: InnerState, ) -> Result<(Established, SplitStream>), RLPxError> { let (context, node, framed, inbound) = match state { - InnerState::Initiator(Initiator { - context, - node, - stream, - }) => { - let mut stream = match Arc::try_unwrap(stream) { - Ok(s) => s, - Err(_) => return Err(RLPxError::StateError("Cannot use the stream".to_string())), + InnerState::Initiator(Initiator { context, node }) => { + let addr = SocketAddr::new(node.ip, node.tcp_port); + let mut stream = match tcp_stream(addr).await { + Ok(result) => result, + Err(error) => { + log_peer_debug(&node, &format!("Error creating tcp connection {error}")); + context.table.lock().await.replace_peer(node.node_id()); + return Err(error)?; + } }; let local_state = send_auth(&context.signer, node.public_key, &mut stream).await?; let remote_state = receive_ack(&context.signer, node.public_key, &mut stream).await?; @@ -136,6 +138,10 @@ pub(crate) async fn perform( )) } +async fn tcp_stream(addr: SocketAddr) -> Result { + TcpSocket::new_v4()?.connect(addr).await +} + async fn send_auth( signer: &SigningKey, remote_public_key: H512, diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index c377533317..780553e65f 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -18,7 +18,7 @@ use spawned_concurrency::tasks::{ CallResponse, CastResponse, GenServer, GenServerHandle, send_interval, }; use tokio::{ - net::{TcpSocket, TcpStream}, + net::TcpStream, sync::{Mutex, broadcast, mpsc::Sender}, task, }; @@ -71,7 +71,6 @@ pub struct RLPxConnectionState(pub InnerState); pub struct Initiator { pub(crate) context: P2PContext, pub(crate) node: Node, - pub(crate) stream: Arc, } #[derive(Clone)] @@ -125,11 +124,10 @@ impl RLPxConnectionState { })) } - pub fn new_as_initiator(context: P2PContext, node: &Node, stream: TcpStream) -> Self { + pub fn new_as_initiator(context: P2PContext, node: &Node) -> Self { Self(InnerState::Initiator(Initiator { context, node: node.clone(), - stream: Arc::new(stream), })) } } @@ -176,16 +174,7 @@ impl RLPxConnection { context: P2PContext, node: &Node, ) -> Result { - let addr = SocketAddr::new(node.ip, node.tcp_port); - let stream = match tcp_stream(addr).await { - Ok(result) => result, - Err(error) => { - log_peer_debug(node, &format!("Error creating tcp connection {error}")); - context.table.lock().await.replace_peer(node.node_id()); - return Err(error); - } - }; - let state = RLPxConnectionState::new_as_initiator(context, node, stream); + let state = RLPxConnectionState::new_as_initiator(context, node); Ok(RLPxConnection::start(state.clone())) } } @@ -337,10 +326,6 @@ impl GenServer for RLPxConnection { } } -async fn tcp_stream(addr: SocketAddr) -> Result { - TcpSocket::new_v4()?.connect(addr).await -} - async fn send_new_pooled_tx_hashes(state: &mut Established) -> Result<(), RLPxError> { if SUPPORTED_ETH_CAPABILITIES .iter() From 0c0b7ef7d3702ada6b7af2b8b713bdd180a89b5c Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Wed, 25 Jun 2025 21:19:22 -0300 Subject: [PATCH 14/40] Breaking listen loop when stream is finished --- crates/networking/p2p/rlpx/connection/server.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 780553e65f..cf5dd6ea4c 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -614,7 +614,10 @@ where log_peer_debug(&node, &format!("Received RLPX Error in msg {}", e)); } }, - None => todo!(), + None => { + // stream is finished, no need to keep looping + break; + } }; } }); From a084f8e880d979f455ab3d304a1e1a40efe9ac90 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Thu, 26 Jun 2025 08:42:55 -0300 Subject: [PATCH 15/40] Improved listen loop syntax --- .../networking/p2p/rlpx/connection/server.rs | 22 +++++++------------ 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index cf5dd6ea4c..90b05a2d0c 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -604,21 +604,15 @@ where { let node = node.clone(); spawned_rt::tasks::spawn(async move { - loop { - match stream.next().await { - Some(message) => match message { - Ok(message) => { - let _ = conn.cast(CastMessage::PeerMessage(message)).await; - } - Err(e) => { - log_peer_debug(&node, &format!("Received RLPX Error in msg {}", e)); - } - }, - None => { - // stream is finished, no need to keep looping - break; + while let Some(message) = stream.next().await { + match message { + Ok(message) => { + let _ = conn.cast(CastMessage::PeerMessage(message)).await; } - }; + Err(e) => { + log_peer_debug(&node, &format!("Received RLPX Error in msg {}", e)); + } + } } }); } From 46d54065d0888567a805c17767f33442b41ad5b7 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Thu, 26 Jun 2025 09:31:10 -0300 Subject: [PATCH 16/40] Improved error handling on connection initialization --- .../networking/p2p/rlpx/connection/server.rs | 127 ++++++++++-------- 1 file changed, 69 insertions(+), 58 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 90b05a2d0c..a0b339af65 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -195,69 +195,18 @@ impl GenServer for RLPxConnection { handle: &GenServerHandle, mut state: Self::State, ) -> Result { - let (mut established_state, mut stream) = handshake::perform(state.0).await?; + let (mut established_state, stream) = handshake::perform(state.0).await?; log_peer_debug(&established_state.node, "Starting RLPx connection"); - if let Err(reason) = post_handshake_checks(established_state.table.clone()).await { + + if let Err(reason) = initialize_connection(handle, &mut established_state, stream).await { connection_failed( &mut established_state, - "Post handshake validations failed", - RLPxError::DisconnectSent(reason), + "Failed to initialize RLPx connection", + reason, ) .await; - return Err(RLPxError::Disconnected()); - } - - if let Err(e) = exchange_hello_messages(&mut established_state, &mut stream).await { - connection_failed(&mut established_state, "Hello messages exchange failed", e).await; Err(RLPxError::Disconnected()) } else { - // Handshake OK: handle connection - // Create channels to communicate directly to the peer - let (peer_channels, sender) = PeerChannels::create(handle.clone()); - - // Updating the state to establish the backend channel - established_state.backend_channel = Some(sender); - - // NOTE: if the peer came from the discovery server it will already be inserted in the table - // but that might not always be the case, so we try to add it to the table - // Note: we don't ping the node we let the validation service do its job - { - let mut table_lock = established_state.table.lock().await; - table_lock.insert_node_forced(established_state.node.clone()); - table_lock.init_backend_communication( - established_state.node.node_id(), - peer_channels, - established_state.capabilities.clone(), - established_state.inbound, - ); - } - init_peer_conn(&mut established_state, &mut stream).await?; - log_peer_debug(&established_state.node, "Peer connection initialized."); - - // Send transactions transaction hashes from mempool at connection start - send_new_pooled_tx_hashes(&mut established_state).await?; - - // Periodic broadcast check repeated events. - send_interval( - TX_BROADCAST_INTERVAL, - handle.clone(), - CastMessage::SendNewPooledTxHashes, - ); - - // Periodic Pings repeated events. - send_interval(PING_INTERVAL, handle.clone(), CastMessage::SendPing); - - // Periodic block range update. - send_interval( - BLOCK_RANGE_UPDATE_INTERVAL, - handle.clone(), - CastMessage::BlockRangeUpdate, - ); - - spawn_listener(handle.clone(), &established_state.node, stream); - - spawn_broadcast_listener(handle.clone(), &mut established_state); - // New state state.0 = InnerState::Established(established_state); Ok(state) @@ -326,6 +275,68 @@ impl GenServer for RLPxConnection { } } +async fn initialize_connection( + handle: &RLPxConnectionHandle, + state: &mut Established, + mut stream: S, +) -> Result<(), RLPxError> +where + S: Unpin + Send + Stream> + 'static, +{ + post_handshake_checks(state.table.clone()).await?; + + exchange_hello_messages(state, &mut stream).await?; + + // Handshake OK: handle connection + // Create channels to communicate directly to the peer + let (peer_channels, sender) = PeerChannels::create(handle.clone()); + + // Updating the state to establish the backend channel + state.backend_channel = Some(sender); + + // NOTE: if the peer came from the discovery server it will already be inserted in the table + // but that might not always be the case, so we try to add it to the table + // Note: we don't ping the node we let the validation service do its job + { + let mut table_lock = state.table.lock().await; + table_lock.insert_node_forced(state.node.clone()); + table_lock.init_backend_communication( + state.node.node_id(), + peer_channels, + state.capabilities.clone(), + state.inbound, + ); + } + init_peer_conn(state, &mut stream).await?; + log_peer_debug(&state.node, "Peer connection initialized."); + + // Send transactions transaction hashes from mempool at connection start + send_new_pooled_tx_hashes(state).await?; + + // Periodic broadcast check repeated events. + send_interval( + TX_BROADCAST_INTERVAL, + handle.clone(), + CastMessage::SendNewPooledTxHashes, + ); + + // Periodic Pings repeated events. + send_interval(PING_INTERVAL, handle.clone(), CastMessage::SendPing); + + // Periodic block range update. + send_interval( + BLOCK_RANGE_UPDATE_INTERVAL, + handle.clone(), + CastMessage::BlockRangeUpdate, + ); + + spawn_listener(handle.clone(), &state.node, stream); + + spawn_broadcast_listener(handle.clone(), state); + + Ok(()) +} + async fn send_new_pooled_tx_hashes(state: &mut Established) -> Result<(), RLPxError> { if SUPPORTED_ETH_CAPABILITIES .iter() @@ -426,7 +437,7 @@ where async fn post_handshake_checks( table: Arc>, -) -> Result<(), DisconnectReason> { +) -> Result<(), RLPxError> { // Check if connected peers exceed the limit let peer_count = { let table_lock = table.lock().await; @@ -434,7 +445,7 @@ async fn post_handshake_checks( }; if peer_count >= MAX_PEERS_TCP_CONNECTIONS { - return Err(DisconnectReason::TooManyPeers); + return Err(RLPxError::DisconnectSent(DisconnectReason::TooManyPeers)); } Ok(()) From 4003852365d7eaebcd0edc382431bb206d3a2a18 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Thu, 26 Jun 2025 09:47:35 -0300 Subject: [PATCH 17/40] updated spawned use in metrics module --- crates/l2/sequencer/metrics.rs | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/crates/l2/sequencer/metrics.rs b/crates/l2/sequencer/metrics.rs index 0dc6d97bdc..58ea4d2b9a 100644 --- a/crates/l2/sequencer/metrics.rs +++ b/crates/l2/sequencer/metrics.rs @@ -4,8 +4,9 @@ use ethereum_types::Address; use ethrex_metrics::metrics_l2::{METRICS_L2, MetricsL2BlockType, MetricsL2OperationType}; use ethrex_metrics::metrics_transactions::METRICS_TX; use ethrex_rpc::clients::eth::EthClient; -use spawned_concurrency::{CallResponse, CastResponse, GenServer, GenServerInMsg, send_after}; -use spawned_rt::mpsc::Sender; +use spawned_concurrency::tasks::{ + CallResponse, CastResponse, GenServer, GenServerInMsg, send_after, +}; use std::time::Duration; use tracing::{debug, error}; @@ -66,7 +67,8 @@ impl MetricsGatherer { } impl GenServer for MetricsGatherer { - type InMsg = InMessage; + type CallMsg = (); + type CastMsg = InMessage; type OutMsg = OutMessage; type State = MetricsGathererState; @@ -78,24 +80,24 @@ impl GenServer for MetricsGatherer { async fn handle_call( &mut self, - _message: Self::InMsg, - _tx: &Sender>, - _state: &mut Self::State, - ) -> CallResponse { - CallResponse::Reply(OutMessage::Done) + _message: Self::CallMsg, + _handle: &GenServerHandle, + state: Self::State, + ) -> CallResponse { + CallResponse::Reply(state, OutMessage::Done) } async fn handle_cast( &mut self, - _message: Self::InMsg, - tx: &Sender>, - state: &mut Self::State, - ) -> CastResponse { + _message: Self::CastMsg, + handle: &GenServerHandle, + state: Self::State, + ) -> CastResponse { // Right now we only have the Gather message, so we ignore the message let _ = gather_metrics(state) .await .inspect_err(|err| error!("Metrics Gatherer Error: {}", err)); - send_after(state.check_interval, tx.clone(), Self::InMsg::Gather); + send_after(state.check_interval, handle.clone(), Self::CastMsg::Gather); CastResponse::NoReply } } From 0dcd1ab80950493ea0555986e4f3dc321ad5fc8c Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Thu, 26 Jun 2025 09:57:56 -0300 Subject: [PATCH 18/40] updated spawned use in metrics module --- crates/l2/sequencer/metrics.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/l2/sequencer/metrics.rs b/crates/l2/sequencer/metrics.rs index 58ea4d2b9a..09395cb78b 100644 --- a/crates/l2/sequencer/metrics.rs +++ b/crates/l2/sequencer/metrics.rs @@ -5,7 +5,7 @@ use ethrex_metrics::metrics_l2::{METRICS_L2, MetricsL2BlockType, MetricsL2Operat use ethrex_metrics::metrics_transactions::METRICS_TX; use ethrex_rpc::clients::eth::EthClient; use spawned_concurrency::tasks::{ - CallResponse, CastResponse, GenServer, GenServerInMsg, send_after, + CallResponse, CastResponse, GenServer, GenServerHandle, send_after, }; use std::time::Duration; use tracing::{debug, error}; @@ -38,6 +38,7 @@ impl MetricsGathererState { } } +#[derive(Clone)] pub enum InMessage { Gather, } @@ -91,14 +92,14 @@ impl GenServer for MetricsGatherer { &mut self, _message: Self::CastMsg, handle: &GenServerHandle, - state: Self::State, + mut state: Self::State, ) -> CastResponse { // Right now we only have the Gather message, so we ignore the message - let _ = gather_metrics(state) + let _ = gather_metrics(&mut state) .await .inspect_err(|err| error!("Metrics Gatherer Error: {}", err)); send_after(state.check_interval, handle.clone(), Self::CastMsg::Gather); - CastResponse::NoReply + CastResponse::NoReply(state) } } From 99db63e9c95c023e78c79e0dbbc01cc334bb18be Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Thu, 26 Jun 2025 11:24:52 -0300 Subject: [PATCH 19/40] emptying service.nix spawned sha to obtain a new one from CI --- crates/l2/tee/quote-gen/service.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/l2/tee/quote-gen/service.nix b/crates/l2/tee/quote-gen/service.nix index ccae0408ea..a0e7f97612 100644 --- a/crates/l2/tee/quote-gen/service.nix +++ b/crates/l2/tee/quote-gen/service.nix @@ -20,7 +20,7 @@ let lockFile = ./Cargo.lock; outputHashes = { "bls12_381-0.8.0" = "sha256-8/pXRA7hVAPeMKCZ+PRPfQfxqstw5Ob4MJNp85pv5WQ="; - "spawned-concurrency-0.1.0" = "sha256-/RO23J4c1fNVpF6ZgHdVPp3C2mgpg+dCwLjg0JcZ0YI="; + "spawned-concurrency-0.1.0" = ""; "aligned-sdk-0.1.0" = "sha256-Az97VtggdN4gsYds3myezNJ+mNeSaIDbF0Pq5kq2M3M="; "lambdaworks-crypto-0.12.0" = "sha256-4vgW/O85zVLhhFrcZUwcPjavy/rRWB8LGTabAkPNrDw="; }; From a292833c4aaa53dcc390fd65927b29ee1be85103 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Thu, 26 Jun 2025 12:05:03 -0300 Subject: [PATCH 20/40] updated spawned in Cargo.lock --- crates/l2/tee/quote-gen/Cargo.lock | 938 ++++++----------------------- 1 file changed, 176 insertions(+), 762 deletions(-) diff --git a/crates/l2/tee/quote-gen/Cargo.lock b/crates/l2/tee/quote-gen/Cargo.lock index 5d1d03eb26..b250415d2f 100644 --- a/crates/l2/tee/quote-gen/Cargo.lock +++ b/crates/l2/tee/quote-gen/Cargo.lock @@ -44,23 +44,11 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cipher", "cpufeatures", ] -[[package]] -name = "ahash" -version = "0.8.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy 0.8.25", -] - [[package]] name = "aho-corasick" version = "1.1.3" @@ -92,12 +80,6 @@ dependencies = [ "url", ] -[[package]] -name = "allocator-api2" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" - [[package]] name = "alloy-consensus" version = "0.11.1" @@ -218,7 +200,7 @@ checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e" dependencies = [ "alloy-rlp", "bytes", - "cfg-if", + "cfg-if 1.0.0", "const-hex", "derive_more 2.0.1", "foldhash", @@ -231,7 +213,7 @@ dependencies = [ "proptest", "rand 0.8.5", "ruint", - "rustc-hash 2.1.1", + "rustc-hash", "serde", "sha3", "tiny-keccak", @@ -610,28 +592,6 @@ dependencies = [ "term", ] -[[package]] -name = "async-stream" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", -] - [[package]] name = "async-trait" version = "0.1.88" @@ -687,41 +647,13 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" -[[package]] -name = "axum" -version = "0.6.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" -dependencies = [ - "async-trait", - "axum-core 0.3.4", - "bitflags 1.3.2", - "bytes", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "itoa", - "matchit 0.7.3", - "memchr", - "mime", - "percent-encoding", - "pin-project-lite", - "rustversion", - "serde", - "sync_wrapper 0.1.2", - "tower 0.4.13", - "tower-layer", - "tower-service", -] - [[package]] name = "axum" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" dependencies = [ - "axum-core 0.5.2", + "axum-core", "bytes", "form_urlencoded", "futures-util", @@ -731,7 +663,7 @@ dependencies = [ "hyper 1.6.0", "hyper-util", "itoa", - "matchit 0.8.4", + "matchit", "memchr", "mime", "percent-encoding", @@ -743,29 +675,12 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.2", "tokio", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", "tracing", ] -[[package]] -name = "axum-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" -dependencies = [ - "async-trait", - "bytes", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "mime", - "rustversion", - "tower-layer", - "tower-service", -] - [[package]] name = "axum-core" version = "0.5.2" @@ -792,8 +707,8 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45bf463831f5131b7d3c756525b305d40f1185b688565648a92e1392ca35713d" dependencies = [ - "axum 0.8.4", - "axum-core 0.5.2", + "axum", + "axum-core", "bytes", "futures-util", "headers", @@ -804,7 +719,7 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", ] @@ -816,7 +731,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" dependencies = [ "addr2line", - "cfg-if", + "cfg-if 1.0.0", "libc", "miniz_oxide", "object", @@ -869,29 +784,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.66.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7" -dependencies = [ - "bitflags 2.9.1", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "rustc-hash 1.1.0", - "shlex", - "syn 2.0.101", - "which", -] - [[package]] name = "bit-set" version = "0.5.3" @@ -959,7 +851,7 @@ dependencies = [ "arrayref", "arrayvec", "cc", - "cfg-if", + "cfg-if 1.0.0", "constant_time_eq 0.3.1", ] @@ -972,15 +864,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "block-padding" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" -dependencies = [ - "generic-array", -] - [[package]] name = "bls12_381" version = "0.8.0" @@ -1110,15 +993,6 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "cbc" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" -dependencies = [ - "cipher", -] - [[package]] name = "cc" version = "1.2.26" @@ -1131,13 +1005,10 @@ dependencies = [ ] [[package]] -name = "cexpr" -version = "0.6.0" +name = "cfg-if" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "cfg-if" @@ -1153,10 +1024,8 @@ checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "android-tzdata", "iana-time-zone", - "js-sys", "num-traits", "serde", - "wasm-bindgen", "windows-link", ] @@ -1197,17 +1066,6 @@ dependencies = [ "inout", ] -[[package]] -name = "clang-sys" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" -dependencies = [ - "glob", - "libc", - "libloading", -] - [[package]] name = "clap" version = "4.5.39" @@ -1340,7 +1198,7 @@ version = "1.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83e22e0ed40b96a48d3db274f72fd365bd78f67af39b6bbd47e8a15e1c6207ff" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "hex", "proptest", @@ -1440,7 +1298,42 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", +] + +[[package]] +name = "crossbeam" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" +dependencies = [ + "cfg-if 0.1.10", + "crossbeam-channel", + "crossbeam-deque 0.7.4", + "crossbeam-epoch 0.8.2", + "crossbeam-queue", + "crossbeam-utils 0.7.2", +] + +[[package]] +name = "crossbeam-channel" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" +dependencies = [ + "crossbeam-utils 0.7.2", + "maybe-uninit", +] + +[[package]] +name = "crossbeam-deque" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" +dependencies = [ + "crossbeam-epoch 0.8.2", + "crossbeam-utils 0.7.2", + "maybe-uninit", ] [[package]] @@ -1449,8 +1342,23 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", + "crossbeam-epoch 0.9.18", + "crossbeam-utils 0.8.21", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", + "lazy_static", + "maybe-uninit", + "memoffset", + "scopeguard", ] [[package]] @@ -1459,7 +1367,29 @@ version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "crossbeam-utils", + "crossbeam-utils 0.8.21", +] + +[[package]] +name = "crossbeam-queue" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" +dependencies = [ + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", + "maybe-uninit", +] + +[[package]] +name = "crossbeam-utils" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "lazy_static", ] [[package]] @@ -1691,7 +1621,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "dirs-sys-next", ] @@ -1803,7 +1733,7 @@ version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -1885,7 +1815,7 @@ dependencies = [ "sha2", "sha3", "thiserror 1.0.69", - "uuid 0.8.2", + "uuid", ] [[package]] @@ -2186,7 +2116,7 @@ version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66244a771d9163282646dbeffe0e6eca4dda4146b6498644e678ac6089b11edd" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "const-hex", "dirs", "dunce", @@ -2217,16 +2147,18 @@ name = "ethrex-blockchain" version = "0.1.0" dependencies = [ "bytes", - "cfg-if", + "cfg-if 1.0.0", "ethrex-common", "ethrex-metrics", "ethrex-rlp", "ethrex-storage", "ethrex-vm", "k256", + "secp256k1", "sha3", "thiserror 2.0.12", "tokio", + "tokio-util", "tracing", ] @@ -2282,7 +2214,7 @@ dependencies = [ "aligned-sdk", "bincode", "bytes", - "cfg-if", + "cfg-if 1.0.0", "directories", "envy", "ethereum-types 0.15.1", @@ -2397,6 +2329,8 @@ dependencies = [ "serde_json", "sha3", "snap", + "spawned-concurrency", + "spawned-rt", "thiserror 2.0.12", "tokio", "tokio-stream", @@ -2421,10 +2355,10 @@ dependencies = [ name = "ethrex-rpc" version = "0.1.0" dependencies = [ - "axum 0.8.4", + "axum", "axum-extra", "bytes", - "cfg-if", + "cfg-if 1.0.0", "envy", "ethereum-types 0.15.1", "ethrex-blockchain", @@ -2447,7 +2381,7 @@ dependencies = [ "thiserror 2.0.12", "tokio", "tokio-util", - "tower-http 0.6.6", + "tower-http", "tracing", "tracing-subscriber", ] @@ -2481,13 +2415,13 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "bincode", "bytes", "ethereum-types 0.15.1", "ethrex-common", "ethrex-rlp", "ethrex-trie", "hex", - "libsql", "serde", "serde_json", "sha3", @@ -2501,12 +2435,14 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "bincode", "ethereum-types 0.15.1", "ethrex-common", "ethrex-rlp", "ethrex-storage", - "libsql", - "tokio", + "ethrex-trie", + "futures", + "thiserror 2.0.12", "tracing", ] @@ -2535,7 +2471,7 @@ version = "0.1.0" dependencies = [ "bincode", "bytes", - "cfg-if", + "cfg-if 1.0.0", "derive_more 1.0.0", "dyn-clone", "ethereum-types 0.15.1", @@ -2564,24 +2500,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "fallible-iterator" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" - -[[package]] -name = "fallible-iterator" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" - -[[package]] -name = "fallible-streaming-iterator" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" - [[package]] name = "fancy-regex" version = "0.14.0" @@ -2870,7 +2788,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -2883,7 +2801,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "r-efi", "wasi 0.14.2+wasi-0.2.4", @@ -2968,7 +2886,7 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crunchy", ] @@ -2978,16 +2896,6 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -[[package]] -name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" -dependencies = [ - "ahash", - "allocator-api2", -] - [[package]] name = "hashbrown" version = "0.15.3" @@ -3007,15 +2915,6 @@ dependencies = [ "fxhash", ] -[[package]] -name = "hashlink" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" -dependencies = [ - "hashbrown 0.14.5", -] - [[package]] name = "headers" version = "0.4.1" @@ -3144,12 +3043,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "http-range-header" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" - [[package]] name = "httparse" version = "1.10.1" @@ -3221,24 +3114,6 @@ dependencies = [ "tokio-rustls 0.24.1", ] -[[package]] -name = "hyper-rustls" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c78f9338483cb7e630c8474b07268983c6bd5acee012e4211f9f7bb21b070" -dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.32", - "log", - "rustls 0.22.4", - "rustls-native-certs", - "rustls-pki-types", - "tokio", - "tokio-rustls 0.25.0", - "webpki-roots 0.26.11", -] - [[package]] name = "hyper-rustls" version = "0.27.7" @@ -3255,18 +3130,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper 0.14.32", - "pin-project-lite", - "tokio", - "tokio-io-timeout", -] - [[package]] name = "hyper-tls" version = "0.6.0" @@ -3545,7 +3408,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ - "block-padding", "generic-array", ] @@ -3555,7 +3417,7 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -3686,7 +3548,7 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "ecdsa", "elliptic-curve", "once_cell", @@ -3811,28 +3673,12 @@ dependencies = [ "spin 0.9.8", ] -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" version = "0.2.172" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" -[[package]] -name = "libloading" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" -dependencies = [ - "cfg-if", - "windows-targets 0.48.5", -] - [[package]] name = "libm" version = "0.2.15" @@ -3850,167 +3696,26 @@ dependencies = [ ] [[package]] -name = "libsql" -version = "0.9.10" +name = "libtest-mimic" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9f529b1afe0465e1f864fa34cdcad08fb463a1bf86b40267493db83508ee9d" +checksum = "5297962ef19edda4ce33aaa484386e0a5b3d7f2f4e037cbeee00503ef6b29d33" dependencies = [ - "anyhow", - "async-stream", - "async-trait", - "base64 0.21.7", - "bincode", - "bitflags 2.9.1", - "bytes", - "chrono", - "crc32fast", - "fallible-iterator 0.3.0", - "futures", - "http 0.2.12", - "hyper 0.14.32", - "hyper-rustls 0.25.0", - "libsql-hrana", - "libsql-sqlite3-parser", - "libsql-sys", - "libsql_replication", - "parking_lot", - "serde", - "serde_json", - "thiserror 1.0.69", - "tokio", - "tokio-stream", - "tokio-util", - "tonic", - "tonic-web", - "tower 0.4.13", - "tower-http 0.4.4", - "tracing", - "uuid 1.17.0", - "zerocopy 0.7.35", + "anstream", + "anstyle", + "clap", + "escape8259", ] [[package]] -name = "libsql-ffi" -version = "0.9.10" +name = "linux-raw-sys" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "739717a55160a200ef8a9122a17d559148ddd3f0c526b52b3908f9a80aca5284" -dependencies = [ - "bindgen", - "cc", - "glob", -] +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] -name = "libsql-hrana" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44633324952c32e3ab7da5e5cf36af9d7afe53e74f811e622ef23207b357ff18" -dependencies = [ - "base64 0.21.7", - "bytes", - "prost", - "serde", -] - -[[package]] -name = "libsql-rusqlite" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d042d19c09bb858a4ca93c6ae346a8267215301e265097c2858a1c91f6384bd" -dependencies = [ - "bitflags 2.9.1", - "fallible-iterator 0.2.0", - "fallible-streaming-iterator", - "hashlink", - "libsql-ffi", - "smallvec", -] - -[[package]] -name = "libsql-sqlite3-parser" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15a90128c708356af8f7d767c9ac2946692c9112b4f74f07b99a01a60680e413" -dependencies = [ - "bitflags 2.9.1", - "cc", - "fallible-iterator 0.3.0", - "indexmap 2.9.0", - "log", - "memchr", - "phf", - "phf_codegen", - "phf_shared", - "uncased", -] - -[[package]] -name = "libsql-sys" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3760d2141b2ac78a24c303c53ee0720301bbbe5158db3fc016c26f3eeeb44712" -dependencies = [ - "bytes", - "libsql-ffi", - "libsql-rusqlite", - "once_cell", - "tracing", - "zerocopy 0.7.35", -] - -[[package]] -name = "libsql_replication" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "039dccb52999803f36bc850be4e2ebc6987d1fe622994df1678b87c849c9dca1" -dependencies = [ - "aes", - "async-stream", - "async-trait", - "bytes", - "cbc", - "libsql-rusqlite", - "libsql-sys", - "parking_lot", - "prost", - "serde", - "thiserror 1.0.69", - "tokio", - "tokio-stream", - "tokio-util", - "tonic", - "tracing", - "uuid 1.17.0", - "zerocopy 0.7.35", -] - -[[package]] -name = "libtest-mimic" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5297962ef19edda4ce33aaa484386e0a5b3d7f2f4e037cbeee00503ef6b29d33" -dependencies = [ - "anstream", - "anstyle", - "clap", - "escape8259", -] - -[[package]] -name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - -[[package]] -name = "linux-raw-sys" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" - -[[package]] -name = "litemap" -version = "0.8.0" +name = "litemap" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" @@ -4052,15 +3757,15 @@ dependencies = [ [[package]] name = "matchit" -version = "0.7.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] -name = "matchit" -version = "0.8.4" +name = "maybe-uninit" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "md-5" @@ -4068,7 +3773,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "digest 0.10.7", ] @@ -4079,16 +3784,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] -name = "mime" -version = "0.3.17" +name = "memoffset" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +dependencies = [ + "autocfg", +] [[package]] -name = "minimal-lexical" -version = "0.2.1" +name = "mime" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" @@ -4133,16 +3841,6 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -4339,7 +4037,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" dependencies = [ "bitflags 2.9.1", - "cfg-if", + "cfg-if 1.0.0", "foreign-types", "libc", "once_cell", @@ -4565,7 +4263,7 @@ version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "redox_syscall", "smallvec", @@ -4617,12 +4315,6 @@ dependencies = [ "hmac", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "1.1.1" @@ -4698,16 +4390,6 @@ dependencies = [ "phf_shared", ] -[[package]] -name = "phf_codegen" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" -dependencies = [ - "phf_generator", - "phf_shared", -] - [[package]] name = "phf_generator" version = "0.11.3" @@ -4738,7 +4420,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ "siphasher", - "uncased", ] [[package]] @@ -4810,7 +4491,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.25", + "zerocopy", ] [[package]] @@ -4925,29 +4606,6 @@ dependencies = [ "unarray", ] -[[package]] -name = "prost" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-derive" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" -dependencies = [ - "anyhow", - "itertools 0.10.5", - "proc-macro2", - "quote", - "syn 2.0.101", -] - [[package]] name = "quick-error" version = "1.2.3" @@ -4984,7 +4642,7 @@ dependencies = [ "serde", "serde_json", "tokio", - "zerocopy 0.8.25", + "zerocopy", "zkvm_interface", ] @@ -5085,8 +4743,8 @@ version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-deque", - "crossbeam-utils", + "crossbeam-deque 0.8.6", + "crossbeam-utils 0.8.21", ] [[package]] @@ -5177,7 +4835,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls 0.21.12", - "rustls-pemfile 1.0.4", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", @@ -5190,7 +4848,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.25.4", + "webpki-roots", "winreg", ] @@ -5227,8 +4885,8 @@ dependencies = [ "sync_wrapper 1.0.2", "tokio", "tokio-native-tls", - "tower 0.5.2", - "tower-http 0.6.6", + "tower", + "tower-http", "tower-service", "url", "wasm-bindgen", @@ -5243,7 +4901,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c175ecec83bba464aa8406502fe5bf670491c2ace81a153264891d43bc7fa332" dependencies = [ "auto_impl", - "cfg-if", + "cfg-if 1.0.0", "dyn-clone", "revm-interpreter", "revm-precompile", @@ -5287,7 +4945,7 @@ checksum = "99743c3a2cac341084cc15ac74286c4bf34a0941ebf60aa420cfdb9f81f72f9f" dependencies = [ "aurora-engine-modexp", "c-kzg", - "cfg-if", + "cfg-if 1.0.0", "k256", "once_cell", "revm-primitives", @@ -5310,7 +4968,7 @@ dependencies = [ "bitflags 2.9.1", "bitvec", "c-kzg", - "cfg-if", + "cfg-if 1.0.0", "dyn-clone", "enumn", "hex", @@ -5349,7 +5007,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", - "cfg-if", + "cfg-if 1.0.0", "getrandom 0.2.16", "libc", "untrusted 0.9.0", @@ -5436,12 +5094,6 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - [[package]] name = "rustc-hash" version = "2.1.1" @@ -5472,19 +5124,6 @@ dependencies = [ "semver 1.0.26", ] -[[package]] -name = "rustix" -version = "0.38.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" -dependencies = [ - "bitflags 2.9.1", - "errno", - "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", -] - [[package]] name = "rustix" version = "1.0.7" @@ -5494,7 +5133,7 @@ dependencies = [ "bitflags 2.9.1", "errno", "libc", - "linux-raw-sys 0.9.4", + "linux-raw-sys", "windows-sys 0.59.0", ] @@ -5510,20 +5149,6 @@ dependencies = [ "sct", ] -[[package]] -name = "rustls" -version = "0.22.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" -dependencies = [ - "log", - "ring 0.17.14", - "rustls-pki-types", - "rustls-webpki 0.102.8", - "subtle", - "zeroize", -] - [[package]] name = "rustls" version = "0.23.27" @@ -5537,19 +5162,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustls-native-certs" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" -dependencies = [ - "openssl-probe", - "rustls-pemfile 2.2.0", - "rustls-pki-types", - "schannel", - "security-framework", -] - [[package]] name = "rustls-pemfile" version = "1.0.4" @@ -5559,15 +5171,6 @@ dependencies = [ "base64 0.21.7", ] -[[package]] -name = "rustls-pemfile" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "rustls-pki-types" version = "1.12.0" @@ -5587,17 +5190,6 @@ dependencies = [ "untrusted 0.9.0", ] -[[package]] -name = "rustls-webpki" -version = "0.102.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" -dependencies = [ - "ring 0.17.14", - "rustls-pki-types", - "untrusted 0.9.0", -] - [[package]] name = "rustls-webpki" version = "0.103.3" @@ -5657,7 +5249,7 @@ version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "derive_more 1.0.0", "parity-scale-codec", "scale-info-derive", @@ -5927,7 +5519,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest 0.10.7", ] @@ -5938,7 +5530,7 @@ version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest 0.10.7", ] @@ -5960,7 +5552,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ "cc", - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -6088,7 +5680,7 @@ checksum = "6939d6b2f63e54e5fbd208a0293027608f22511741b62fe32b6f67f6c144e0c0" dependencies = [ "bincode", "blake3", - "cfg-if", + "cfg-if 1.0.0", "hex", "lazy_static", "num-bigint 0.4.6", @@ -6106,7 +5698,7 @@ version = "0.8.0-sp1-5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac255e1704ebcdeec5e02f6a0ebc4d2e9e6b802161938330b6810c13a610c583" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "ff", "group", "pairing", @@ -6118,7 +5710,7 @@ dependencies = [ [[package]] name = "spawned-concurrency" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.0-alpha#abd5476b7cc0feeafd96ca79c6844e87702c7f83" +source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.1-alpha#481439d3d147fa741d67bb5ff047f8c68bcefb83" dependencies = [ "futures", "spawned-rt", @@ -6128,9 +5720,11 @@ dependencies = [ [[package]] name = "spawned-rt" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.0-alpha#abd5476b7cc0feeafd96ca79c6844e87702c7f83" +source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.1-alpha#481439d3d147fa741d67bb5ff047f8c68bcefb83" dependencies = [ + "crossbeam", "tokio", + "tokio-util", "tracing", "tracing-subscriber", ] @@ -6365,7 +5959,7 @@ dependencies = [ "fastrand", "getrandom 0.3.3", "once_cell", - "rustix 1.0.7", + "rustix", "windows-sys 0.59.0", ] @@ -6426,7 +6020,7 @@ version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "once_cell", ] @@ -6522,16 +6116,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-macros" version = "2.5.0" @@ -6563,17 +6147,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-rustls" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" -dependencies = [ - "rustls 0.22.4", - "rustls-pki-types", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.2" @@ -6607,7 +6180,7 @@ dependencies = [ "tokio", "tokio-rustls 0.24.1", "tungstenite 0.20.1", - "webpki-roots 0.25.4", + "webpki-roots", ] [[package]] @@ -6680,73 +6253,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" -[[package]] -name = "tonic" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" -dependencies = [ - "async-stream", - "async-trait", - "axum 0.6.20", - "base64 0.21.7", - "bytes", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "hyper-timeout", - "percent-encoding", - "pin-project", - "prost", - "tokio", - "tokio-stream", - "tower 0.4.13", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tonic-web" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc3b0e1cedbf19fdfb78ef3d672cb9928e0a91a9cb4629cc0c916e8cff8aaaa1" -dependencies = [ - "base64 0.21.7", - "bytes", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "pin-project", - "tokio-stream", - "tonic", - "tower-http 0.4.4", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "indexmap 1.9.3", - "pin-project", - "pin-project-lite", - "rand 0.8.5", - "slab", - "tokio", - "tokio-util", - "tower-layer", - "tower-service", - "tracing", -] - [[package]] name = "tower" version = "0.5.2" @@ -6763,26 +6269,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower-http" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" -dependencies = [ - "bitflags 2.9.1", - "bytes", - "futures-core", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "http-range-header", - "pin-project-lite", - "tower 0.4.13", - "tower-layer", - "tower-service", - "tracing", -] - [[package]] name = "tower-http" version = "0.6.6" @@ -6796,7 +6282,7 @@ dependencies = [ "http-body 1.0.1", "iri-string", "pin-project-lite", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", ] @@ -6972,15 +6458,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" -[[package]] -name = "uncased" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-ident" version = "1.0.18" @@ -7056,18 +6533,6 @@ dependencies = [ "serde", ] -[[package]] -name = "uuid" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" -dependencies = [ - "getrandom 0.3.3", - "js-sys", - "serde", - "wasm-bindgen", -] - [[package]] name = "valuable" version = "0.1.1" @@ -7135,7 +6600,7 @@ version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "once_cell", "rustversion", "wasm-bindgen-macro", @@ -7161,7 +6626,7 @@ version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "once_cell", "wasm-bindgen", @@ -7216,36 +6681,6 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" -[[package]] -name = "webpki-roots" -version = "0.26.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" -dependencies = [ - "webpki-roots 1.0.0", -] - -[[package]] -name = "webpki-roots" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb" -dependencies = [ - "rustls-pki-types", -] - -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix 0.38.44", -] - [[package]] name = "winapi" version = "0.3.9" @@ -7510,7 +6945,7 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "windows-sys 0.48.0", ] @@ -7587,34 +7022,13 @@ dependencies = [ "synstructure", ] -[[package]] -name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "byteorder", - "zerocopy-derive 0.7.35", -] - [[package]] name = "zerocopy" version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" dependencies = [ - "zerocopy-derive 0.8.25", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", + "zerocopy-derive", ] [[package]] @@ -7713,7 +7127,7 @@ dependencies = [ "bzip2", "constant_time_eq 0.1.5", "crc32fast", - "crossbeam-utils", + "crossbeam-utils 0.8.21", "flate2", "hmac", "pbkdf2 0.11.0", From 937373cec89f7a09ca5c990345b9d93f9ca57a55 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Thu, 26 Jun 2025 12:23:31 -0300 Subject: [PATCH 21/40] updated service.nix with proper sha --- crates/l2/tee/quote-gen/service.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/l2/tee/quote-gen/service.nix b/crates/l2/tee/quote-gen/service.nix index a0e7f97612..1df2c1d64a 100644 --- a/crates/l2/tee/quote-gen/service.nix +++ b/crates/l2/tee/quote-gen/service.nix @@ -20,7 +20,7 @@ let lockFile = ./Cargo.lock; outputHashes = { "bls12_381-0.8.0" = "sha256-8/pXRA7hVAPeMKCZ+PRPfQfxqstw5Ob4MJNp85pv5WQ="; - "spawned-concurrency-0.1.0" = ""; + "spawned-concurrency-0.1.0" = "sha256-ZnQ6eBFG/r1chTbaoh117J7QmtogwVdu6q/j7JLvO/o="; "aligned-sdk-0.1.0" = "sha256-Az97VtggdN4gsYds3myezNJ+mNeSaIDbF0Pq5kq2M3M="; "lambdaworks-crypto-0.12.0" = "sha256-4vgW/O85zVLhhFrcZUwcPjavy/rRWB8LGTabAkPNrDw="; }; From e98aa41d1b1b09f7faada13f7b90bf42673696af Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Fri, 27 Jun 2025 09:51:54 -0300 Subject: [PATCH 22/40] Corrected potential problem on tcp stream handling --- crates/networking/p2p/rlpx/connection/server.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index a0b339af65..2e6623bb01 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -307,7 +307,7 @@ where state.inbound, ); } - init_peer_conn(state, &mut stream).await?; + init_capabilities(state, &mut stream).await?; log_peer_debug(&state.node, "Peer connection initialized."); // Send transactions transaction hashes from mempool at connection start @@ -398,7 +398,7 @@ async fn should_send_block_range_update(state: &mut Established) -> Result(state: &mut Established, stream: &mut S) -> Result<(), RLPxError> +async fn init_capabilities(state: &mut Established, stream: &mut S) -> Result<(), RLPxError> where S: Unpin + Stream>, { @@ -615,14 +615,18 @@ where { let node = node.clone(); spawned_rt::tasks::spawn(async move { - while let Some(message) = stream.next().await { - match message { - Ok(message) => { + loop { + match stream.next().await { + Some(Ok(message)) => { let _ = conn.cast(CastMessage::PeerMessage(message)).await; } - Err(e) => { + Some(Err(e)) => { log_peer_debug(&node, &format!("Received RLPX Error in msg {}", e)); + break; } + // `None` does not neccessary means EOF, so we will keep the loop running + // (See Framed::new) + None => (), } } }); From fb4a9593e0d419fb6654a2fc6290943f4fbc1f7e Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Fri, 27 Jun 2025 18:06:32 -0300 Subject: [PATCH 23/40] Added TODO comments and links to issues --- crates/l2/based/block_fetcher.rs | 2 + crates/l2/based/state_updater.rs | 2 + crates/l2/sequencer/errors.rs | 12 + crates/l2/sequencer/l1_watcher_old.rs | 377 ++++++++++++++++++ .../networking/p2p/rlpx/connection/server.rs | 1 + 5 files changed, 394 insertions(+) create mode 100644 crates/l2/sequencer/l1_watcher_old.rs diff --git a/crates/l2/based/block_fetcher.rs b/crates/l2/based/block_fetcher.rs index 74f9d5f121..be4e10b1a8 100644 --- a/crates/l2/based/block_fetcher.rs +++ b/crates/l2/based/block_fetcher.rs @@ -57,6 +57,8 @@ pub enum BlockFetcherError { BlobBundleError, #[error("Failed to compute deposit logs hash: {0}")] DepositError(#[from] ethrex_l2_common::deposits::DepositError), + // TODO: Avoid propagating GenServerErrors outside GenServer modules + // See https://github.com/lambdaclass/ethrex/issues/3376 #[error("Spawned GenServer Error")] GenServerError(GenServerError), } diff --git a/crates/l2/based/state_updater.rs b/crates/l2/based/state_updater.rs index 8fc42bab40..5505fe21cf 100644 --- a/crates/l2/based/state_updater.rs +++ b/crates/l2/based/state_updater.rs @@ -34,6 +34,8 @@ pub enum StateUpdaterError { InvalidForkChoice(#[from] ethrex_blockchain::error::InvalidForkChoice), #[error("Internal Error: {0}")] InternalError(String), + // TODO: Avoid propagating GenServerErrors outside GenServer modules + // See https://github.com/lambdaclass/ethrex/issues/3376 #[error("Spawned GenServer Error")] GenServerError(GenServerError), } diff --git a/crates/l2/sequencer/errors.rs b/crates/l2/sequencer/errors.rs index e00a9f2e5e..8bf903efd5 100644 --- a/crates/l2/sequencer/errors.rs +++ b/crates/l2/sequencer/errors.rs @@ -64,6 +64,8 @@ pub enum L1WatcherError { FailedAccessingRollUpStore(#[from] RollupStoreError), #[error("{0}")] Custom(String), + // TODO: Avoid propagating GenServerErrors outside GenServer modules + // See https://github.com/lambdaclass/ethrex/issues/3376 #[error("Spawned GenServer Error")] GenServerError(GenServerError), } @@ -130,6 +132,8 @@ pub enum ProofSenderError { InternalError(String), #[error("Failed to parse OnChainProposer response: {0}")] FailedToParseOnChainProposerResponse(String), + // TODO: Avoid propagating GenServerErrors outside GenServer modules + // See https://github.com/lambdaclass/ethrex/issues/3376 #[error("Spawned GenServer Error")] GenServerError(GenServerError), #[error("Proof Sender failed because of a rollup store error: {0}")] @@ -194,6 +198,8 @@ pub enum BlockProducerError { FailedToEncodeAccountStateDiff(#[from] StateDiffError), #[error("Failed to get data from: {0}")] FailedToGetDataFrom(String), + // TODO: Avoid propagating GenServerErrors outside GenServer modules + // See https://github.com/lambdaclass/ethrex/issues/3376 #[error("Spawned GenServer Error")] GenServerError(GenServerError), } @@ -244,6 +250,8 @@ pub enum CommitterError { DepositError(#[from] DepositError), #[error("L1Message error: {0}")] L1MessageError(#[from] L1MessagingError), + // TODO: Avoid propagating GenServerErrors outside GenServer modules + // See https://github.com/lambdaclass/ethrex/issues/3376 #[error("Spawned GenServer Error")] GenServerError(GenServerError), } @@ -270,6 +278,8 @@ pub enum MetricsGathererError { EthClientError(#[from] EthClientError), #[error("MetricsGatherer: {0}")] TryInto(String), + // TODO: Avoid propagating GenServerErrors outside GenServer modules + // See https://github.com/lambdaclass/ethrex/issues/3376 #[error("Spawned GenServer Error")] GenServerError(GenServerError), } @@ -284,6 +294,8 @@ pub enum ExecutionCacheError { #[derive(Debug, thiserror::Error)] pub enum ConnectionHandlerError { + // TODO: Avoid propagating GenServerErrors outside GenServer modules + // See https://github.com/lambdaclass/ethrex/issues/3376 #[error("Spawned GenServer Error")] GenServerError(GenServerError), } diff --git a/crates/l2/sequencer/l1_watcher_old.rs b/crates/l2/sequencer/l1_watcher_old.rs new file mode 100644 index 0000000000..e7b2617d0f --- /dev/null +++ b/crates/l2/sequencer/l1_watcher_old.rs @@ -0,0 +1,377 @@ +use crate::{sequencer::errors::L1WatcherError, utils::parse::hash_to_address}; +use crate::{EthConfig, L1WatcherConfig, SequencerConfig}; +use bytes::Bytes; +use ethereum_types::{Address, H256, U256}; +use ethrex_blockchain::Blockchain; +use ethrex_common::{types::Transaction, H160}; +use ethrex_rpc::types::receipt::RpcLog; +use ethrex_rpc::{ + clients::eth::{eth_sender::Overrides, EthClient}, + types::receipt::RpcLogInfo, +}; +use ethrex_storage::Store; +use keccak_hash::keccak; +use std::{cmp::min, sync::Arc}; +use tracing::{debug, error, info, warn}; + +use super::errors::SequencerError; +use super::utils::sleep_random; + +pub async fn start_l1_watcher( + store: Store, + blockchain: Arc, + cfg: SequencerConfig, +) -> Result<(), SequencerError> { + let mut l1_watcher = L1Watcher::new_from_config(&cfg.l1_watcher, &cfg.eth).await?; + l1_watcher.run(&store, &blockchain).await; + Ok(()) +} + +pub struct L1Watcher { + eth_client: EthClient, + l2_client: EthClient, + address: Address, + max_block_step: U256, + last_block_fetched: U256, + check_interval: u64, + l1_block_delay: u64, +} + +impl L1Watcher { + pub async fn new_from_config( + watcher_config: &L1WatcherConfig, + eth_config: &EthConfig, + ) -> Result { + let eth_client = EthClient::new(ð_config.rpc_url); + let l2_client = EthClient::new("http://localhost:1729"); + + let last_block_fetched = U256::zero(); + Ok(Self { + eth_client, + l2_client, + address: watcher_config.bridge_address, + max_block_step: watcher_config.max_block_step, + last_block_fetched, + check_interval: watcher_config.check_interval_ms, + l1_block_delay: watcher_config.watcher_block_delay, + }) + } + + pub async fn run(&mut self, store: &Store, blockchain: &Blockchain) { + loop { + if let Err(err) = self.main_logic(store, blockchain).await { + error!("L1 Watcher Error: {}", err); + } + } + } + + async fn main_logic( + &mut self, + store: &Store, + blockchain: &Blockchain, + ) -> Result<(), L1WatcherError> { + loop { + sleep_random(self.check_interval).await; + + let logs = self.get_logs().await?; + + // We may not have a deposit nor a withdrawal, that means no events -> no logs. + if logs.is_empty() { + continue; + } + + let _deposit_txs = self.process_logs(logs, store, blockchain).await?; + } + } + + pub async fn get_logs(&mut self) -> Result, L1WatcherError> { + if self.last_block_fetched.is_zero() { + self.last_block_fetched = self + .eth_client + .get_last_fetched_l1_block(self.address) + .await? + .into(); + } + + let Some(latest_block_to_check) = self + .eth_client + .get_block_number() + .await? + .checked_sub(self.l1_block_delay.into()) + else { + warn!("Too close to genesis to request deposits"); + return Ok(vec![]); + }; + + debug!( + "Latest possible block number with {} blocks of delay: {latest_block_to_check} ({latest_block_to_check:#x})", + self.l1_block_delay, + ); + + // last_block_fetched could be greater than latest_block_to_check: + // - Right after deploying the contract as latest_block_fetched is set to the block where the contract is deployed + // - If the node is stopped and l1_block_delay is changed + if self.last_block_fetched > latest_block_to_check { + warn!("Last block fetched is greater than latest safe block"); + return Ok(vec![]); + } + + let new_last_block = min( + self.last_block_fetched + self.max_block_step, + latest_block_to_check, + ); + + debug!( + "Looking logs from block {:#x} to {:#x}", + self.last_block_fetched, new_last_block + ); + + // Matches the event DepositInitiated from ICommonBridge.sol + let topic = keccak( + b"DepositInitiated(uint256,address,uint256,address,address,uint256,bytes,bytes32)", + ); + let logs = match self + .eth_client + .get_logs( + self.last_block_fetched + 1, + new_last_block, + self.address, + topic, + ) + .await + { + Ok(logs) => logs, + Err(error) => { + // We may get an error if the RPC doesn't has the logs for the requested + // block interval. For example, Light Nodes. + warn!("Error when getting logs from L1: {}", error); + vec![] + } + }; + + debug!("Logs: {:#?}", logs); + + // If we have an error adding the tx to the mempool we may assign it to the next + // block to fetch, but we may lose a deposit tx. + self.last_block_fetched = new_last_block; + + Ok(logs) + } + + pub async fn process_logs( + &self, + logs: Vec, + store: &Store, + blockchain: &Blockchain, + ) -> Result, L1WatcherError> { + let mut deposit_txs = Vec::new(); + + for log in logs { + let deposit_data = DepositData::from_log(log.log)?; + + if self + .deposit_already_processed(deposit_data.deposit_tx_hash, store) + .await? + { + warn!( + "Deposit already processed (to: {:x}, value: {:x}, depositId: {:#}), skipping.", + deposit_data.recipient, deposit_data.mint_value, deposit_data.deposit_id + ); + continue; + } + + info!( + "Initiating mint transaction for {:x} with value {:x} and depositId: {:#}", + deposit_data.recipient, deposit_data.mint_value, deposit_data.deposit_id + ); + + let gas_price = self.l2_client.get_gas_price().await?; + // Avoid panicking when using as_u64() + let gas_price: u64 = gas_price + .try_into() + .map_err(|_| L1WatcherError::Custom("Failed at gas_price.try_into()".to_owned()))?; + + let mint_transaction = self + .eth_client + .build_privileged_transaction( + deposit_data.to_address, + deposit_data.recipient, + deposit_data.from, + Bytes::copy_from_slice(&deposit_data.calldata), + Overrides { + chain_id: Some( + store + .get_chain_config() + .map_err(|e| { + L1WatcherError::FailedToRetrieveChainConfig(e.to_string()) + })? + .chain_id, + ), + // Using the deposit_id as nonce. + // If we make a transaction on the L2 with this address, we may break the + // deposit workflow. + nonce: Some(deposit_data.deposit_id.as_u64()), + value: Some(deposit_data.mint_value), + gas_limit: Some(deposit_data.gas_limit.as_u64()), + // TODO(CHECK): Seems that when we start the L2, we need to set the gas. + // Otherwise, the transaction is not included in the mempool. + // We should override the blockchain to always include the transaction. + max_fee_per_gas: Some(gas_price), + max_priority_fee_per_gas: Some(gas_price), + ..Default::default() + }, + ) + .await?; + + match blockchain + .add_transaction_to_pool(Transaction::PrivilegedL2Transaction(mint_transaction)) + .await + { + Ok(hash) => { + info!("Mint transaction added to mempool {hash:#x}",); + deposit_txs.push(hash); + } + Err(e) => { + warn!("Failed to add mint transaction to the mempool: {e:#?}"); + // TODO: Figure out if we want to continue or not + continue; + } + } + } + + Ok(deposit_txs) + } + + async fn deposit_already_processed( + &self, + deposit_hash: H256, + store: &Store, + ) -> Result { + if store + .get_transaction_by_hash(deposit_hash) + .await + .map_err(L1WatcherError::FailedAccessingStore)? + .is_some() + { + return Ok(true); + } + + // If we have a reconstructed state, we don't have the transaction in our store. + // Check if the deposit is marked as pending in the contract. + let pending_deposits = self + .eth_client + .get_pending_deposit_logs(self.address) + .await?; + Ok(!pending_deposits.contains(&deposit_hash)) + } +} + +struct DepositData { + pub mint_value: U256, + pub to_address: H160, + pub deposit_id: U256, + pub recipient: H160, + pub from: H160, + pub gas_limit: U256, + pub calldata: Vec, + pub deposit_tx_hash: H256, +} + +impl DepositData { + fn from_log(log: RpcLogInfo) -> Result { + let mint_value = format!( + "{:#x}", + log.topics + .get(1) + .ok_or(L1WatcherError::FailedToDeserializeLog( + "Failed to parse mint value from log: log.topics[1] out of bounds".to_owned() + ))? + ) + .parse::() + .map_err(|e| { + L1WatcherError::FailedToDeserializeLog(format!( + "Failed to parse mint value from log: {e:#?}" + )) + })?; + let to_address_hash = log + .topics + .get(2) + .ok_or(L1WatcherError::FailedToDeserializeLog( + "Failed to parse beneficiary from log: log.topics[2] out of bounds".to_owned(), + ))?; + let to_address = hash_to_address(*to_address_hash); + + let deposit_id = log + .topics + .get(3) + .ok_or(L1WatcherError::FailedToDeserializeLog( + "Failed to parse beneficiary from log: log.topics[3] out of bounds".to_owned(), + ))?; + + let deposit_id = format!("{deposit_id:#x}").parse::().map_err(|e| { + L1WatcherError::FailedToDeserializeLog(format!( + "Failed to parse depositId value from log: {e:#?}" + )) + })?; + + // The previous values are indexed in the topic of the log. Data contains the rest. + // DATA = recipient: Address || from: Address || gas_limit: uint256 || offset_calldata: uint256 || tx_hash: H256 || length_calldata: uint256 || calldata: bytes + // DATA = 0..32 || 32..64 || 64..96 || 96..128 || 128..160 || 160..192 || 192..(192+calldata_len) + // Any value that is not 32 bytes is padded with zeros. + + let recipient = log + .data + .get(12..32) + .ok_or(L1WatcherError::FailedToDeserializeLog( + "Failed to parse recipient from log: log.data[0..32] out of bounds".to_owned(), + ))?; + let recipient = Address::from_slice(recipient); + + let from = log + .data + .get(44..64) + .ok_or(L1WatcherError::FailedToDeserializeLog( + "Failed to parse from from log: log.data[44..64] out of bounds".to_owned(), + ))?; + let from = Address::from_slice(from); + + let gas_limit = U256::from_big_endian(log.data.get(64..96).ok_or( + L1WatcherError::FailedToDeserializeLog( + "Failed to parse gas_limit from log: log.data[64..96] out of bounds".to_owned(), + ), + )?); + + let deposit_tx_hash = H256::from_slice( + log.data + .get(128..160) + .ok_or(L1WatcherError::FailedToDeserializeLog( + "Failed to parse deposit_tx_hash from log: log.data[64..96] out of bounds" + .to_owned(), + ))?, + ); + + let calldata_len = U256::from_big_endian(log.data.get(160..192).ok_or( + L1WatcherError::FailedToDeserializeLog( + "Failed to parse calldata_len from log: log.data[96..128] out of bounds".to_owned(), + ), + )?); + let calldata = log + .data + .get(192..192 + calldata_len.as_usize()) + .ok_or(L1WatcherError::FailedToDeserializeLog( + "Failed to parse calldata from log: log.data[128..128 + calldata_len] out of bounds" + .to_owned(), + ))?; + + Ok(Self { + mint_value, + to_address, + deposit_id, + recipient, + from, + gas_limit, + calldata: calldata.to_vec(), + deposit_tx_hash, + }) + } +} diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 2e6623bb01..860aa3483e 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -231,6 +231,7 @@ impl GenServer for RLPxConnection { if let InnerState::Established(mut established_state) = state.0.clone() { match message { // TODO: handle all these "let _" + // See https://github.com/lambdaclass/ethrex/issues/3375 Self::CastMsg::PeerMessage(message) => { log_peer_debug( &established_state.node, From 3291f4f92fc9ac09ef0602cd13921cf9f43d16eb Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Mon, 30 Jun 2025 10:22:56 -0300 Subject: [PATCH 24/40] Added TODO comments and links to issues --- crates/networking/p2p/rlpx/connection/server.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 860aa3483e..09742b0cff 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -102,6 +102,8 @@ pub struct Established { //// messages from other connections (sent from other peers). //// The receive end is instantiated after the handshake is completed //// under `handle_peer`. + /// TODO: Improve this mechanism + /// See https://github.com/lambdaclass/ethrex/issues/3388 pub(crate) connection_broadcast_send: RLPxConnBroadcastSender, pub(crate) table: Arc>, pub(crate) backend_channel: Option>, @@ -610,6 +612,9 @@ where stream.next().await } +// TODO replace this spawn, once it's implemented in spawned +// See https://github.com/lambdaclass/ethrex/issues/3387 and +// https://github.com/lambdaclass/spawned/issues/17 fn spawn_listener(mut conn: RLPxConnectionHandle, node: &Node, mut stream: S) where S: Unpin + Send + Stream> + 'static, @@ -633,6 +638,11 @@ where }); } +// TODO Maybe provide a similar mechanism for this listener, or remove it when +// Broadcast is handled in a spawned GenServer +// See https://github.com/lambdaclass/ethrex/issues/3387 and +// https://github.com/lambdaclass/spawned/issues/17 and +// https://github.com/lambdaclass/ethrex/issues/3388 fn spawn_broadcast_listener(mut handle: RLPxConnectionHandle, state: &mut Established) { // Subscribe this connection to the broadcasting channel. // TODO currently spawning a listener task that will suscribe to a broadcast channel and From 010ce63bfce0c8f99cffb33b24e03ce12eacd91a Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Mon, 30 Jun 2025 10:58:29 -0300 Subject: [PATCH 25/40] Fixed linter issues --- crates/networking/p2p/rlpx/connection/server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index f5b0d0cc31..2731525092 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -810,7 +810,7 @@ async fn handle_backend_message( state: &mut Established, message: Message, ) -> Result<(), RLPxError> { - log_peer_debug(&state.node, &format!("Sending message {}", message)); + log_peer_debug(&state.node, &format!("Sending message {message}")); send(state, message).await?; Ok(()) } From d05f77e0e70289d9cc694b12c7974a7014b8cb2d Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Tue, 1 Jul 2025 17:04:18 -0300 Subject: [PATCH 26/40] Updated to newest spawned version --- Cargo.lock | 4 +-- Cargo.toml | 4 +-- crates/l2/based/block_fetcher.rs | 17 ++++--------- crates/l2/based/state_updater.rs | 17 ++++--------- crates/l2/sequencer/block_producer.rs | 16 +++--------- crates/l2/sequencer/errors.rs | 2 +- crates/l2/sequencer/l1_committer.rs | 16 +++--------- crates/l2/sequencer/l1_proof_sender.rs | 16 +++--------- crates/l2/sequencer/l1_watcher.rs | 16 +++--------- crates/l2/sequencer/proof_coordinator.rs | 25 +++---------------- .../networking/p2p/rlpx/connection/server.rs | 16 +++--------- 11 files changed, 38 insertions(+), 111 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 717a6c66c1..8e4593730c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10729,7 +10729,7 @@ dependencies = [ [[package]] name = "spawned-concurrency" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.1-alpha#481439d3d147fa741d67bb5ff047f8c68bcefb83" +source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.2-alpha#c6f757c0cc07a34f9e56c8c7ea8fde483b50ea20" dependencies = [ "futures", "spawned-rt", @@ -10739,7 +10739,7 @@ dependencies = [ [[package]] name = "spawned-rt" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.1-alpha#481439d3d147fa741d67bb5ff047f8c68bcefb83" +source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.2-alpha#c6f757c0cc07a34f9e56c8c7ea8fde483b50ea20" dependencies = [ "crossbeam 0.7.3", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 89e9fb753d..3838419b73 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -96,8 +96,8 @@ libsql = "0.9.10" futures = "0.3.31" # Changing the tag for spawned will break the TDX image build # When updating it try to build the TDX image and update service.nix with the new hash -spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", tag = "v0.1.1-alpha"} -spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", tag = "v0.1.1-alpha"} +spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", tag = "v0.1.2-alpha"} +spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", tag = "v0.1.2-alpha"} [patch.crates-io] secp256k1 = { git = "https://github.com/sp1-patches/rust-secp256k1", tag = "patch-0.29.1-sp1-5.0.0" } diff --git a/crates/l2/based/block_fetcher.rs b/crates/l2/based/block_fetcher.rs index be4e10b1a8..99681d874a 100644 --- a/crates/l2/based/block_fetcher.rs +++ b/crates/l2/based/block_fetcher.rs @@ -18,8 +18,10 @@ use ethrex_storage::Store; use ethrex_storage_rollup::{RollupStoreError, StoreRollup}; use ethrex_vm::{Evm, EvmEngine}; use keccak_hash::keccak; -use spawned_concurrency::tasks::{ - CallResponse, CastResponse, GenServer, GenServerError, GenServerHandle, send_after, +use spawned_concurrency::{ + error::GenServerError, + messages::Unused, + tasks::{CastResponse, GenServer, GenServerHandle, send_after}, }; use tracing::{debug, error, info}; @@ -134,7 +136,7 @@ impl BlockFetcher { } impl GenServer for BlockFetcher { - type CallMsg = (); + type CallMsg = Unused; type CastMsg = InMessage; type OutMsg = OutMessage; type State = BlockFetcherState; @@ -144,15 +146,6 @@ impl GenServer for BlockFetcher { Self {} } - async fn handle_call( - &mut self, - _message: Self::CallMsg, - _handle: &GenServerHandle, - state: Self::State, - ) -> CallResponse { - CallResponse::Reply(state, Self::OutMsg::Done) - } - async fn handle_cast( &mut self, _message: Self::CastMsg, diff --git a/crates/l2/based/state_updater.rs b/crates/l2/based/state_updater.rs index 5505fe21cf..0522c444b8 100644 --- a/crates/l2/based/state_updater.rs +++ b/crates/l2/based/state_updater.rs @@ -6,8 +6,10 @@ use ethrex_l2_sdk::calldata::encode_calldata; use ethrex_rpc::{EthClient, clients::Overrides}; use ethrex_storage::Store; use ethrex_storage_rollup::{RollupStoreError, StoreRollup}; -use spawned_concurrency::tasks::{ - CallResponse, CastResponse, GenServer, GenServerError, GenServerHandle, send_after, +use spawned_concurrency::{ + error::GenServerError, + messages::Unused, + tasks::{CastResponse, GenServer, GenServerHandle, send_after}, }; use tracing::{debug, error, info, warn}; @@ -103,7 +105,7 @@ impl StateUpdater { } impl GenServer for StateUpdater { - type CallMsg = (); + type CallMsg = Unused; type CastMsg = InMessage; type OutMsg = OutMessage; type State = StateUpdaterState; @@ -113,15 +115,6 @@ impl GenServer for StateUpdater { Self {} } - async fn handle_call( - &mut self, - _message: Self::CallMsg, - _handle: &GenServerHandle, - state: Self::State, - ) -> CallResponse { - CallResponse::Reply(state, Self::OutMsg::Done) - } - async fn handle_cast( &mut self, _message: Self::CastMsg, diff --git a/crates/l2/sequencer/block_producer.rs b/crates/l2/sequencer/block_producer.rs index 7273157c7a..e3014d7515 100644 --- a/crates/l2/sequencer/block_producer.rs +++ b/crates/l2/sequencer/block_producer.rs @@ -16,8 +16,9 @@ use ethrex_storage_rollup::StoreRollup; use ethrex_vm::BlockExecutionResult; use keccak_hash::H256; use payload_builder::build_payload; -use spawned_concurrency::tasks::{ - CallResponse, CastResponse, GenServer, GenServerHandle, send_after, +use spawned_concurrency::{ + messages::Unused, + tasks::{CastResponse, GenServer, GenServerHandle, send_after}, }; use tracing::{debug, error, info}; @@ -105,7 +106,7 @@ impl BlockProducer { } impl GenServer for BlockProducer { - type CallMsg = (); + type CallMsg = Unused; type CastMsg = InMessage; type OutMsg = OutMessage; type State = BlockProducerState; @@ -116,15 +117,6 @@ impl GenServer for BlockProducer { Self {} } - async fn handle_call( - &mut self, - _message: Self::CallMsg, - _handle: &GenServerHandle, - state: Self::State, - ) -> CallResponse { - CallResponse::Reply(state, Self::OutMsg::Done) - } - async fn handle_cast( &mut self, _message: Self::CastMsg, diff --git a/crates/l2/sequencer/errors.rs b/crates/l2/sequencer/errors.rs index e583c92102..0a44475275 100644 --- a/crates/l2/sequencer/errors.rs +++ b/crates/l2/sequencer/errors.rs @@ -14,7 +14,7 @@ use ethrex_rpc::clients::eth::errors::{CalldataEncodeError, EthClientError}; use ethrex_storage::error::StoreError; use ethrex_storage_rollup::RollupStoreError; use ethrex_vm::{EvmError, ProverDBError}; -use spawned_concurrency::tasks::GenServerError; +use spawned_concurrency::error::GenServerError; use tokio::task::JoinError; #[derive(Debug, thiserror::Error)] diff --git a/crates/l2/sequencer/l1_committer.rs b/crates/l2/sequencer/l1_committer.rs index 3e2c56eb77..18bee62cbc 100644 --- a/crates/l2/sequencer/l1_committer.rs +++ b/crates/l2/sequencer/l1_committer.rs @@ -35,8 +35,9 @@ use std::collections::HashMap; use tracing::{debug, error, info, warn}; use super::{errors::BlobEstimationError, utils::random_duration}; -use spawned_concurrency::tasks::{ - CallResponse, CastResponse, GenServer, GenServerHandle, send_after, +use spawned_concurrency::{ + messages::Unused, + tasks::{CastResponse, GenServer, GenServerHandle, send_after}, }; const COMMIT_FUNCTION_SIGNATURE_BASED: &str = @@ -130,7 +131,7 @@ impl L1Committer { } impl GenServer for L1Committer { - type CallMsg = (); + type CallMsg = Unused; type CastMsg = InMessage; type OutMsg = OutMessage; type State = CommitterState; @@ -141,15 +142,6 @@ impl GenServer for L1Committer { Self {} } - async fn handle_call( - &mut self, - _message: Self::CallMsg, - _handle: &GenServerHandle, - state: Self::State, - ) -> CallResponse { - CallResponse::Reply(state, Self::OutMsg::Done) - } - async fn handle_cast( &mut self, _message: Self::CastMsg, diff --git a/crates/l2/sequencer/l1_proof_sender.rs b/crates/l2/sequencer/l1_proof_sender.rs index 4211982866..53663241cf 100644 --- a/crates/l2/sequencer/l1_proof_sender.rs +++ b/crates/l2/sequencer/l1_proof_sender.rs @@ -9,8 +9,9 @@ use ethrex_l2_sdk::calldata::encode_calldata; use ethrex_rpc::EthClient; use ethrex_storage_rollup::StoreRollup; use secp256k1::SecretKey; -use spawned_concurrency::tasks::{ - CallResponse, CastResponse, GenServer, GenServerHandle, send_after, +use spawned_concurrency::{ + messages::Unused, + tasks::{CastResponse, GenServer, GenServerHandle, send_after}, }; use tracing::{debug, error, info}; @@ -140,7 +141,7 @@ impl L1ProofSender { } impl GenServer for L1ProofSender { - type CallMsg = (); + type CallMsg = Unused; type CastMsg = InMessage; type OutMsg = OutMessage; type State = L1ProofSenderState; @@ -151,15 +152,6 @@ impl GenServer for L1ProofSender { Self {} } - async fn handle_call( - &mut self, - _message: Self::CallMsg, - _handle: &GenServerHandle, - state: Self::State, - ) -> CallResponse { - CallResponse::Reply(state, OutMessage::Done) - } - async fn handle_cast( &mut self, _message: Self::CastMsg, diff --git a/crates/l2/sequencer/l1_watcher.rs b/crates/l2/sequencer/l1_watcher.rs index 1b0f0e246b..077297751f 100644 --- a/crates/l2/sequencer/l1_watcher.rs +++ b/crates/l2/sequencer/l1_watcher.rs @@ -13,9 +13,8 @@ use ethrex_rpc::{ }; use ethrex_storage::Store; use keccak_hash::keccak; -use spawned_concurrency::tasks::{ - CallResponse, CastResponse, GenServer, GenServerHandle, send_after, -}; +use spawned_concurrency::messages::Unused; +use spawned_concurrency::tasks::{CastResponse, GenServer, GenServerHandle, send_after}; use std::{cmp::min, sync::Arc}; use tracing::{debug, error, info, warn}; @@ -93,8 +92,8 @@ impl L1Watcher { } impl GenServer for L1Watcher { + type CallMsg = Unused; type CastMsg = InMessage; - type CallMsg = (); type OutMsg = OutMessage; type State = L1WatcherState; type Error = L1WatcherError; @@ -117,15 +116,6 @@ impl GenServer for L1Watcher { Ok(state) } - async fn handle_call( - &mut self, - _message: Self::CallMsg, - _handle: &GenServerHandle, - state: Self::State, - ) -> CallResponse { - CallResponse::Reply(state, Self::OutMsg::Done) - } - async fn handle_cast( &mut self, message: Self::CastMsg, diff --git a/crates/l2/sequencer/proof_coordinator.rs b/crates/l2/sequencer/proof_coordinator.rs index 6e04a8ce78..ae76840f24 100644 --- a/crates/l2/sequencer/proof_coordinator.rs +++ b/crates/l2/sequencer/proof_coordinator.rs @@ -19,7 +19,8 @@ use ethrex_storage_rollup::StoreRollup; use secp256k1::SecretKey; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use spawned_concurrency::tasks::{CallResponse, CastResponse, GenServer, GenServerHandle}; +use spawned_concurrency::messages::Unused; +use spawned_concurrency::tasks::{CastResponse, GenServer, GenServerHandle}; use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use tokio::{ @@ -238,7 +239,7 @@ impl ProofCoordinator { } impl GenServer for ProofCoordinator { - type CallMsg = (); + type CallMsg = Unused; type CastMsg = ProofCordInMessage; type OutMsg = ProofCordOutMessage; type State = ProofCoordinatorState; @@ -248,15 +249,6 @@ impl GenServer for ProofCoordinator { Self {} } - async fn handle_call( - &mut self, - _message: Self::CallMsg, - _handle: &GenServerHandle, - state: Self::State, - ) -> CallResponse { - CallResponse::Reply(state, ProofCordOutMessage::Done) - } - async fn handle_cast( &mut self, message: Self::CastMsg, @@ -332,7 +324,7 @@ pub enum ConnOutMessage { } impl GenServer for ConnectionHandler { - type CallMsg = ConnInMessage; + type CallMsg = Unused; type CastMsg = ConnInMessage; type OutMsg = ConnOutMessage; type State = ProofCoordinatorState; @@ -342,15 +334,6 @@ impl GenServer for ConnectionHandler { Self {} } - async fn handle_call( - &mut self, - _message: Self::CallMsg, - _handle: &GenServerHandle, - state: Self::State, - ) -> CallResponse { - CallResponse::Reply(state, ConnOutMessage::Done) - } - async fn handle_cast( &mut self, message: Self::CastMsg, diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 2731525092..6974959665 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -14,8 +14,9 @@ use ethrex_storage::Store; use futures::{SinkExt as _, Stream, stream::SplitSink}; use k256::{PublicKey, ecdsa::SigningKey}; use rand::random; -use spawned_concurrency::tasks::{ - CallResponse, CastResponse, GenServer, GenServerHandle, send_interval, +use spawned_concurrency::{ + messages::Unused, + tasks::{CastResponse, GenServer, GenServerHandle, send_interval}, }; use tokio::{ net::TcpStream, @@ -182,7 +183,7 @@ impl RLPxConnection { } impl GenServer for RLPxConnection { - type CallMsg = CallMessage; + type CallMsg = Unused; type CastMsg = CastMessage; type OutMsg = MsgResult; type State = RLPxConnectionState; @@ -215,15 +216,6 @@ impl GenServer for RLPxConnection { } } - async fn handle_call( - &mut self, - _message: Self::CallMsg, - _handle: &RLPxConnectionHandle, - state: Self::State, - ) -> CallResponse { - CallResponse::Reply(state, Ok(OutMessage::Done)) - } - async fn handle_cast( &mut self, message: Self::CastMsg, From 8b9d4b87389ca7d963016fcb8aa3fa4e3ffc50c9 Mon Sep 17 00:00:00 2001 From: Lucas Fiegl Date: Mon, 30 Jun 2025 16:19:40 -0300 Subject: [PATCH 27/40] feat(l2): implement ERC20 bridge (#3241) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Motivation** We want to be able to bridge ERC20 tokens. **Description** The inner workings are explained on #3223 --------- Co-authored-by: Tomás Grüner <47506558+MegaRedHand@users.noreply.github.com> Co-authored-by: Manuel Iñaki Bilbao --- .github/workflows/pr-main_l2.yaml | 12 + crates/l2/contracts/bin/deployer/error.rs | 10 +- crates/l2/contracts/bin/deployer/main.rs | 195 +----------- .../bin/system_contracts_updater/main.rs | 8 +- crates/l2/contracts/src/example/L2ERC20.sol | 33 ++ crates/l2/contracts/src/l1/CommonBridge.sol | 119 ++++++- .../src/l1/interfaces/ICommonBridge.sol | 33 +- crates/l2/contracts/src/l2/CommonBridgeL2.sol | 49 ++- .../{L1Messenger.sol => L2ToL1Messenger.sol} | 6 +- .../src/l2/interfaces/ICommonBridgeL2.sol | 52 ++++ .../contracts/src/l2/interfaces/IERC20L2.sol | 21 ++ ...{IL1Messenger.sol => IL2ToL1Messenger.sol} | 2 +- crates/l2/sdk/src/sdk.rs | 292 +++++++++++++++++- crates/l2/tests/tests.rs | 234 ++++++++++++-- fixtures/genesis/l2.json | 6 +- 15 files changed, 820 insertions(+), 252 deletions(-) create mode 100644 crates/l2/contracts/src/example/L2ERC20.sol rename crates/l2/contracts/src/l2/{L1Messenger.sol => L2ToL1Messenger.sol} (68%) create mode 100644 crates/l2/contracts/src/l2/interfaces/IERC20L2.sol rename crates/l2/contracts/src/l2/interfaces/{IL1Messenger.sol => IL2ToL1Messenger.sol} (96%) diff --git a/.github/workflows/pr-main_l2.yaml b/.github/workflows/pr-main_l2.yaml index 2b405fbe20..565c835721 100644 --- a/.github/workflows/pr-main_l2.yaml +++ b/.github/workflows/pr-main_l2.yaml @@ -70,6 +70,11 @@ jobs: - name: Set up Rust cache uses: Swatinem/rust-cache@v2 + - name: Install solc + uses: pontem-network/get-solc@master + with: + version: v0.8.29 + # also creates empty verification keys (as workflow runs with exec backend) - name: Build prover run: | @@ -111,6 +116,7 @@ jobs: - name: Run test run: | + sudo chmod -R a+rw crates/l2 cd crates/l2 RUST_LOG=info,ethrex_prover_lib=debug make init-prover & docker logs --follow ethrex_l2 & @@ -132,6 +138,11 @@ jobs: - name: Set up Rust cache uses: Swatinem/rust-cache@v2 + - name: Install solc + uses: pontem-network/get-solc@master + with: + version: v0.8.29 + - name: Build L1 docker image uses: docker/build-push-action@v6 with: @@ -197,6 +208,7 @@ jobs: - name: Run test run: | + sudo chmod -R a+rw crates/l2 cd crates/l2 RUST_LOG=info,ethrex_prover_lib=debug make init-prover & PROPOSER_COINBASE_ADDRESS=0x0007a881CD95B1484fca47615B64803dad620C8d cargo test l2 --release -- --nocapture --test-threads=1 diff --git a/crates/l2/contracts/bin/deployer/error.rs b/crates/l2/contracts/bin/deployer/error.rs index 75fdcbca56..6a518650ff 100644 --- a/crates/l2/contracts/bin/deployer/error.rs +++ b/crates/l2/contracts/bin/deployer/error.rs @@ -1,18 +1,14 @@ -use ethrex_l2_sdk::{ContractCompilationError, DeployError}; +use ethrex_l2_sdk::{ContractCompilationError, DeployError, GitError}; use ethrex_rpc::clients::{EthClientError, eth::errors::CalldataEncodeError}; #[derive(Debug, thiserror::Error)] pub enum DeployerError { - #[error("Failed to lock SALT: {0}")] - FailedToLockSALT(String), #[error("The path is not a valid utf-8 string")] FailedToGetStringFromPath, #[error("Deployer setup error: {0} not set")] ConfigValueNotSet(String), - #[error("Deployer setup parse error: {0}")] - ParseError(String), #[error("Deployer dependency error: {0}")] - DependencyError(String), + DependencyError(#[from] GitError), #[error("Deployer EthClient error: {0}")] EthClientError(#[from] EthClientError), #[error("Deployer decoding error: {0}")] @@ -23,6 +19,8 @@ pub enum DeployerError { FailedToCompileContract(#[from] ContractCompilationError), #[error("Failed to deploy contract: {0}")] FailedToDeployContract(#[from] DeployError), + #[error("Deployment subtask failed: {0}")] + DeploymentSubtaskFailed(String), #[error("Internal error: {0}")] InternalError(String), #[error("IO error: {0}")] diff --git a/crates/l2/contracts/bin/deployer/main.rs b/crates/l2/contracts/bin/deployer/main.rs index 9ba3d3c2a5..f97812d3c2 100644 --- a/crates/l2/contracts/bin/deployer/main.rs +++ b/crates/l2/contracts/bin/deployer/main.rs @@ -2,7 +2,7 @@ use std::{ fs::{File, OpenOptions, read_to_string}, io::{BufWriter, Write}, path::PathBuf, - process::{Command, ExitStatus, Stdio}, + process::{Command, Stdio}, str::FromStr, }; @@ -87,190 +87,7 @@ async fn main() -> Result<(), DeployerError> { } fn download_contract_deps(opts: &DeployerOptions) -> Result<(), DeployerError> { - trace!("Downloading contract dependencies"); - std::fs::create_dir_all(opts.contracts_path.join("lib")).map_err(|err| { - DeployerError::DependencyError(format!("Failed to create contracts/lib: {err}")) - })?; - - git_clone( - "https://github.com/OpenZeppelin/openzeppelin-contracts-upgradeable.git", - opts.contracts_path - .join("lib/openzeppelin-contracts-upgradeable") - .to_str() - .ok_or(DeployerError::FailedToGetStringFromPath)?, - None, - true, - )?; - - git_clone( - "https://github.com/succinctlabs/sp1-contracts.git", - opts.contracts_path - .join("lib/sp1-contracts") - .to_str() - .ok_or(DeployerError::FailedToGetStringFromPath)?, - None, - false, - )?; - - trace!("Contract dependencies downloaded"); - Ok(()) -} - -pub fn git_clone( - repository_url: &str, - outdir: &str, - branch: Option<&str>, - submodules: bool, -) -> Result { - info!(repository_url = %repository_url, outdir = %outdir, branch = ?branch, "Cloning or updating git repository"); - - if PathBuf::from(outdir).join(".git").exists() { - info!(outdir = %outdir, "Found existing git repository, updating..."); - - let branch_name = if let Some(b) = branch { - b.to_string() - } else { - // Look for default branch name (could be main, master or other) - let output = Command::new("git") - .current_dir(outdir) - .arg("symbolic-ref") - .arg("refs/remotes/origin/HEAD") - .output() - .map_err(|e| { - DeployerError::DependencyError(format!( - "Failed to get default branch for {outdir}: {e}" - )) - })?; - - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - return Err(DeployerError::DependencyError(format!( - "Failed to get default branch for {outdir}: {stderr}" - ))); - } - - String::from_utf8(output.stdout) - .map_err(|_| { - DeployerError::InternalError("Failed to parse git output".to_string()) - })? - .trim() - .split('/') - .next_back() - .ok_or(DeployerError::InternalError( - "Failed to parse default branch".to_string(), - ))? - .to_string() - }; - - trace!(branch = %branch_name, "Updating to branch"); - - // Fetch - let fetch_status = Command::new("git") - .current_dir(outdir) - .args(["fetch", "origin"]) - .spawn() - .map_err(|err| { - DeployerError::DependencyError(format!("Failed to spawn git fetch: {err}")) - })? - .wait() - .map_err(|err| { - DeployerError::DependencyError(format!("Failed to wait for git fetch: {err}")) - })?; - if !fetch_status.success() { - return Err(DeployerError::DependencyError(format!( - "git fetch failed for {outdir}" - ))); - } - - // Checkout to branch - let checkout_status = Command::new("git") - .current_dir(outdir) - .arg("checkout") - .arg(&branch_name) - .spawn() - .map_err(|err| { - DeployerError::DependencyError(format!("Failed to spawn git checkout: {err}")) - })? - .wait() - .map_err(|err| { - DeployerError::DependencyError(format!("Failed to wait for git checkout: {err}")) - })?; - if !checkout_status.success() { - return Err(DeployerError::DependencyError(format!( - "git checkout of branch {branch_name} failed for {outdir}, try deleting the repo folder" - ))); - } - - // Reset branch to origin - let reset_status = Command::new("git") - .current_dir(outdir) - .arg("reset") - .arg("--hard") - .arg(format!("origin/{branch_name}")) - .spawn() - .map_err(|err| { - DeployerError::DependencyError(format!("Failed to spawn git reset: {err}")) - })? - .wait() - .map_err(|err| { - DeployerError::DependencyError(format!("Failed to wait for git reset: {err}")) - })?; - - if !reset_status.success() { - return Err(DeployerError::DependencyError(format!( - "git reset failed for {outdir}" - ))); - } - - // Update submodules - if submodules { - let submodule_status = Command::new("git") - .current_dir(outdir) - .arg("submodule") - .arg("update") - .arg("--init") - .arg("--recursive") - .spawn() - .map_err(|err| { - DeployerError::DependencyError(format!( - "Failed to spawn git submodule update: {err}" - )) - })? - .wait() - .map_err(|err| { - DeployerError::DependencyError(format!( - "Failed to wait for git submodule update: {err}" - )) - })?; - if !submodule_status.success() { - return Err(DeployerError::DependencyError(format!( - "git submodule update failed for {outdir}" - ))); - } - } - - Ok(reset_status) - } else { - trace!(repository_url = %repository_url, outdir = %outdir, branch = ?branch, "Cloning git repository"); - let mut git_cmd = Command::new("git"); - - let git_clone_cmd = git_cmd.arg("clone").arg(repository_url); - - if let Some(branch) = branch { - git_clone_cmd.arg("--branch").arg(branch); - } - - if submodules { - git_clone_cmd.arg("--recurse-submodules"); - } - - git_clone_cmd - .arg(outdir) - .spawn() - .map_err(|err| DeployerError::DependencyError(format!("Failed to spawn git: {err}")))? - .wait() - .map_err(|err| DeployerError::DependencyError(format!("Failed to wait for git: {err}"))) - } + ethrex_l2_sdk::download_contract_deps(&opts.contracts_path).map_err(DeployerError::from) } fn compile_contracts(opts: &DeployerOptions) -> Result<(), DeployerError> { @@ -462,9 +279,13 @@ fn deploy_tdx_contracts( .current_dir("tee/contracts") .stdout(Stdio::null()) .spawn() - .map_err(|err| DeployerError::DependencyError(format!("Failed to spawn make: {err}")))? + .map_err(|err| { + DeployerError::DeploymentSubtaskFailed(format!("Failed to spawn make: {err}")) + })? .wait() - .map_err(|err| DeployerError::DependencyError(format!("Failed to wait for make: {err}")))?; + .map_err(|err| { + DeployerError::DeploymentSubtaskFailed(format!("Failed to wait for make: {err}")) + })?; let address = read_tdx_deployment_address("TDXVerifier"); Ok(address) diff --git a/crates/l2/contracts/bin/system_contracts_updater/main.rs b/crates/l2/contracts/bin/system_contracts_updater/main.rs index ba03d18c80..4e8c70b5ce 100644 --- a/crates/l2/contracts/bin/system_contracts_updater/main.rs +++ b/crates/l2/contracts/bin/system_contracts_updater/main.rs @@ -7,7 +7,7 @@ use error::SystemContractsUpdaterError; use ethrex_common::U256; use ethrex_common::types::GenesisAccount; use ethrex_l2::utils::test_data_io::read_genesis_file; -use ethrex_l2_sdk::{COMMON_BRIDGE_L2_ADDRESS, L1_MESSENGER_ADDRESS, compile_contract}; +use ethrex_l2_sdk::{COMMON_BRIDGE_L2_ADDRESS, L2_TO_L1_MESSENGER_ADDRESS, compile_contract}; use genesis_tool::genesis::write_genesis_as_json; mod cli; mod error; @@ -15,7 +15,7 @@ mod error; fn main() -> Result<(), SystemContractsUpdaterError> { let opts = SystemContractsUpdaterOptions::parse(); compile_contract(&opts.contracts_path, "src/l2/CommonBridgeL2.sol", true)?; - compile_contract(&opts.contracts_path, "src/l2/L1Messenger.sol", true)?; + compile_contract(&opts.contracts_path, "src/l2/L2ToL1Messenger.sol", true)?; update_genesis_file(&opts.l2_genesis_path)?; Ok(()) } @@ -39,10 +39,10 @@ fn update_genesis_file(l2_genesis_path: &PathBuf) -> Result<(), SystemContractsU }, ); - let l1_messenger_runtime = std::fs::read("contracts/solc_out/L1Messenger.bin-runtime")?; + let l1_messenger_runtime = std::fs::read("contracts/solc_out/L2ToL1Messenger.bin-runtime")?; genesis.alloc.insert( - L1_MESSENGER_ADDRESS, + L2_TO_L1_MESSENGER_ADDRESS, GenesisAccount { code: Bytes::from(hex::decode(l1_messenger_runtime)?), storage: HashMap::new(), diff --git a/crates/l2/contracts/src/example/L2ERC20.sol b/crates/l2/contracts/src/example/L2ERC20.sol new file mode 100644 index 0000000000..69d9421f85 --- /dev/null +++ b/crates/l2/contracts/src/example/L2ERC20.sol @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: MIT +pragma solidity =0.8.29; + +import "@openzeppelin/contracts/token/ERC20/ERC20.sol"; +import "../l2/interfaces/IERC20L2.sol"; + +/// @title Example L2-side bridgeable token +/// @author LambdaClass +contract TestTokenL2 is ERC20, IERC20L2 { + address public L1_TOKEN = address(0); + address public constant BRIDGE = 0x000000000000000000000000000000000000FFff; + + constructor(address l1Addr) ERC20("TestTokenL2", "TEST") { + L1_TOKEN = l1Addr; + } + + modifier onlyBridge() { + require(msg.sender == BRIDGE, "TestToken: not authorized to mint"); + _; + } + + function l1Address() external view returns (address) { + return L1_TOKEN; + } + + function crosschainMint(address destination, uint256 amount) external onlyBridge { + _mint(destination, amount); + } + + function crosschainBurn(address from, uint256 value) external onlyBridge { + _burn(from, value); + } +} diff --git a/crates/l2/contracts/src/l1/CommonBridge.sol b/crates/l2/contracts/src/l1/CommonBridge.sol index 4a2d14ead8..f04266f3d2 100644 --- a/crates/l2/contracts/src/l1/CommonBridge.sol +++ b/crates/l2/contracts/src/l1/CommonBridge.sol @@ -5,6 +5,8 @@ import "@openzeppelin/contracts-upgradeable/proxy/utils/UUPSUpgradeable.sol"; import "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; import "@openzeppelin/contracts-upgradeable/access/Ownable2StepUpgradeable.sol"; import "@openzeppelin/contracts-upgradeable/utils/ReentrancyGuardUpgradeable.sol"; +import "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol"; import "./interfaces/ICommonBridge.sol"; import "./interfaces/IOnChainProposer.sol"; import "../l2/interfaces/ICommonBridgeL2.sol"; @@ -18,6 +20,8 @@ contract CommonBridge is Ownable2StepUpgradeable, ReentrancyGuardUpgradeable { + using SafeERC20 for IERC20; + /// @notice Mapping of unclaimed withdrawals. A withdrawal is claimed if /// there is a non-zero value in the mapping (a merkle root) for the hash /// of the L2 transaction that requested the withdrawal. @@ -50,6 +54,16 @@ contract CommonBridge is /// @dev It's used to validate withdrawals address public constant L2_BRIDGE_ADDRESS = address(0xffff); + /// @notice How much of each L1 token was deposited to each L2 token. + /// @dev Stored as L1 -> L2 -> amount + /// @dev Prevents L2 tokens from faking their L1 address and stealing tokens + /// @dev The token can take the value {ETH_TOKEN} to represent ETH + mapping(address => mapping(address => uint256)) public deposits; + + /// @notice Token address used to represent ETH + address public constant ETH_TOKEN = + 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE; + modifier onlyOnChainProposer() { require( msg.sender == ON_CHAIN_PROPOSER, @@ -122,6 +136,7 @@ contract CommonBridge is } function _deposit(address l2Recipient) private { + deposits[ETH_TOKEN][ETH_TOKEN] += msg.value; bytes memory callData = abi.encodeCall(ICommonBridgeL2.mintETH, (l2Recipient)); SendValues memory sendValues = SendValues({ to: L2_BRIDGE_ADDRESS, @@ -136,6 +151,29 @@ contract CommonBridge is _deposit(msg.sender); } + function depositERC20( + address tokenL1, + address tokenL2, + address destination, + uint256 amount + ) external { + require(amount > 0, "CommonBridge: amount to deposit is zero"); + deposits[tokenL1][tokenL2] += amount; + IERC20(tokenL1).safeTransferFrom(msg.sender, address(this), amount); + + bytes memory callData = abi.encodeCall( + ICommonBridgeL2.mintERC20, + (tokenL1, tokenL2, destination, amount) + ); + SendValues memory sendValues = SendValues({ + to: L2_BRIDGE_ADDRESS, + gasLimit: 21000 * 5, + value: 0, + data: callData + }); + _sendToL2(L2_BRIDGE_ADDRESS, sendValues); + } + /// @inheritdoc ICommonBridge function getPendingDepositLogsVersionedHash( uint16 number @@ -207,8 +245,66 @@ contract CommonBridge is uint256 withdrawalBatchNumber, uint256 withdrawalLogIndex, bytes32[] calldata withdrawalProof + ) public { + _claimWithdrawal( + l2WithdrawalTxHash, + ETH_TOKEN, + ETH_TOKEN, + claimedAmount, + withdrawalBatchNumber, + withdrawalLogIndex, + withdrawalProof + ); + (bool success, ) = payable(msg.sender).call{value: claimedAmount}(""); + require(success, "CommonBridge: failed to send the claimed amount"); + } + + /// @inheritdoc ICommonBridge + function claimWithdrawalERC20( + bytes32 l2WithdrawalTxHash, + address tokenL1, + address tokenL2, + uint256 claimedAmount, + uint256 withdrawalBatchNumber, + uint256 withdrawalLogIndex, + bytes32[] calldata withdrawalProof ) public nonReentrant { - bytes32 withdrawalId = keccak256(abi.encodePacked(withdrawalBatchNumber, withdrawalLogIndex)); + _claimWithdrawal( + l2WithdrawalTxHash, + tokenL1, + tokenL2, + claimedAmount, + withdrawalBatchNumber, + withdrawalLogIndex, + withdrawalProof + ); + require( + tokenL1 != ETH_TOKEN, + "CommonBridge: attempted to withdraw ETH as if it were ERC20, use claimWithdrawal()" + ); + IERC20(tokenL1).safeTransfer(msg.sender, claimedAmount); + } + + function _claimWithdrawal( + bytes32 l2WithdrawalTxHash, + address tokenL1, + address tokenL2, + uint256 claimedAmount, + uint256 withdrawalBatchNumber, + uint256 withdrawalLogIndex, + bytes32[] calldata withdrawalProof + ) private { + require( + deposits[tokenL1][tokenL2] >= claimedAmount, + "CommonBridge: trying to withdraw more tokens/ETH than were deposited" + ); + deposits[tokenL1][tokenL2] -= claimedAmount; + bytes32 msgHash = keccak256( + abi.encodePacked(tokenL1, tokenL2, msg.sender, claimedAmount) + ); + bytes32 withdrawalId = keccak256( + abi.encodePacked(withdrawalBatchNumber, withdrawalLogIndex) + ); require( batchWithdrawalLogsMerkleRoots[withdrawalBatchNumber] != bytes32(0), "CommonBridge: the batch that emitted the withdrawal logs was not committed" @@ -222,34 +318,27 @@ contract CommonBridge is claimedWithdrawals[withdrawalId] == false, "CommonBridge: the withdrawal was already claimed" ); + claimedWithdrawals[withdrawalId] = true; + emit WithdrawalClaimed(withdrawalId); require( - _verifyWithdrawProof( + _verifyMessageProof( l2WithdrawalTxHash, - claimedAmount, + msgHash, withdrawalBatchNumber, withdrawalLogIndex, withdrawalProof ), - "CommonBridge: invalid withdrawal proof" + "CommonBridge: Invalid proof" ); - - (bool success, ) = payable(msg.sender).call{value: claimedAmount}(""); - - require(success, "CommonBridge: failed to send the claimed amount"); - - claimedWithdrawals[withdrawalId] = true; - - emit WithdrawalClaimed(withdrawalId, msg.sender, claimedAmount); } - function _verifyWithdrawProof( + function _verifyMessageProof( bytes32 l2WithdrawalTxHash, - uint256 claimedAmount, + bytes32 msgHash, uint256 withdrawalBatchNumber, uint256 withdrawalLogIndex, bytes32[] calldata withdrawalProof ) internal view returns (bool) { - bytes32 msgHash = keccak256(abi.encodePacked(msg.sender, claimedAmount)); bytes32 withdrawalLeaf = keccak256( abi.encodePacked(l2WithdrawalTxHash, L2_BRIDGE_ADDRESS, msgHash) ); diff --git a/crates/l2/contracts/src/l1/interfaces/ICommonBridge.sol b/crates/l2/contracts/src/l1/interfaces/ICommonBridge.sol index 80b83bdcc7..86f5db0de9 100644 --- a/crates/l2/contracts/src/l1/interfaces/ICommonBridge.sol +++ b/crates/l2/contracts/src/l1/interfaces/ICommonBridge.sol @@ -39,13 +39,9 @@ interface ICommonBridge { /// @notice A withdrawal has been claimed. /// @dev Event emitted when a withdrawal is claimed. - /// @param l2WithdrawalTxHash the hash of the L2 withdrawal transaction. - /// @param claimee the address that claimed the withdrawal. - /// @param claimedAmount the amount that was claimed. + /// @param withdrawalId the hash of the batch and index of the withdrawal event WithdrawalClaimed( - bytes32 indexed l2WithdrawalTxHash, - address indexed claimee, - uint256 indexed claimedAmount + bytes32 indexed withdrawalId ); struct SendValues { @@ -125,10 +121,8 @@ interface ICommonBridge { /// @param l2WithdrawalTxHash the hash of the L2 withdrawal transaction. /// @param claimedAmount the amount that will be claimed. /// @param withdrawalProof the merkle path to the withdrawal log. - /// @param withdrawalLogIndex the index of the withdrawal log in the block. - /// This is the index of the withdraw transaction relative to the block's - /// withdrawal transctions. - /// A pseudocode would be [tx if tx is withdrawx for tx in block.txs()].index(leaf_tx). + /// @param withdrawalLogIndex the index of the message log in the block. + /// This is the index of the withdraw transaction relative to the block's messages. /// @param l2WithdrawalBatchNumber the batch number where the withdrawal log /// was emitted. function claimWithdrawal( @@ -138,4 +132,23 @@ interface ICommonBridge { uint256 withdrawalLogIndex, bytes32[] calldata withdrawalProof ) external; + + /// @notice Claims an ERC20 withdrawal + /// @param l2WithdrawalTxHash the hash of the L2 withdrawal transaction. + /// @param tokenL1 Address of the token on the L1 + /// @param tokenL2 Address of the token on the L2 + /// @param claimedAmount the amount that will be claimed. + /// @param withdrawalProof the merkle path to the withdrawal log. + /// @param withdrawalLogIndex the index of the message log in the batch. + /// @param l2WithdrawalBatchNumber the batch number where the withdrawal log + /// was emitted. + function claimWithdrawalERC20( + bytes32 l2WithdrawalTxHash, + address tokenL1, + address tokenL2, + uint256 claimedAmount, + uint256 l2WithdrawalBatchNumber, + uint256 withdrawalLogIndex, + bytes32[] calldata withdrawalProof + ) external; } diff --git a/crates/l2/contracts/src/l2/CommonBridgeL2.sol b/crates/l2/contracts/src/l2/CommonBridgeL2.sol index aa231910d0..e77cba5fdd 100644 --- a/crates/l2/contracts/src/l2/CommonBridgeL2.sol +++ b/crates/l2/contracts/src/l2/CommonBridgeL2.sol @@ -2,7 +2,8 @@ pragma solidity =0.8.29; import "./interfaces/ICommonBridgeL2.sol"; -import "./interfaces/IL1Messenger.sol"; +import "./interfaces/IL2ToL1Messenger.sol"; +import "./interfaces/IERC20L2.sol"; /// @title CommonBridge L2 contract. /// @author LambdaClass @@ -11,6 +12,14 @@ contract CommonBridgeL2 is ICommonBridgeL2 { 0x000000000000000000000000000000000000FFFE; address public constant BURN_ADDRESS = 0x0000000000000000000000000000000000000000; + /// @notice Token address used to represent ETH + address public constant ETH_TOKEN = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE; + + // Some calls come as a privileged transaction, whose sender is the bridge itself. + modifier onlySelf() { + require(msg.sender == address(this), "CommonBridgeL2: caller is not the bridge"); + _; + } function withdraw(address _receiverOnL1) external payable { require(msg.value > 0, "Withdrawal amount must be positive"); @@ -18,15 +27,51 @@ contract CommonBridgeL2 is ICommonBridgeL2 { (bool success, ) = BURN_ADDRESS.call{value: msg.value}(""); require(success, "Failed to burn Ether"); - IL1Messenger(L1_MESSENGER).sendMessageToL1(keccak256(abi.encodePacked( + emit WithdrawalInitiated(msg.sender, _receiverOnL1, msg.value); + + IL2ToL1Messenger(L1_MESSENGER).sendMessageToL1(keccak256(abi.encodePacked( + ETH_TOKEN, + ETH_TOKEN, _receiverOnL1, msg.value ))); } + function mintETH(address to) external payable { (bool success, ) = to.call{value: msg.value}(""); if (!success) { this.withdraw{value: msg.value}(to); } + emit DepositProcessed(to, msg.value); + } + + function mintERC20(address tokenL1, address tokenL2, address destination, uint256 amount) external onlySelf { + (bool success, ) = address(this).call(abi.encodeCall(this.tryMintERC20, (tokenL1, tokenL2, destination, amount))); + if (!success) { + _withdraw(tokenL1, tokenL2, destination, amount); + } + emit ERC20DepositProcessed(tokenL1, tokenL2, destination, amount); + } + + function tryMintERC20(address tokenL1, address tokenL2, address destination, uint256 amount) external onlySelf { + IERC20L2 token = IERC20L2(tokenL2); + require(token.l1Address() == tokenL1); + token.crosschainMint(destination, amount); + } + + function withdrawERC20(address tokenL1, address tokenL2, address destination, uint256 amount) external { + require(amount > 0, "Withdrawal amount must be positive"); + IERC20L2(tokenL2).crosschainBurn(msg.sender, amount); + emit ERC20WithdrawalInitiated(tokenL1, tokenL2, destination, amount); + _withdraw(tokenL1, tokenL2, destination, amount); + } + + function _withdraw(address tokenL1, address tokenL2, address destination, uint256 amount) private { + IL2ToL1Messenger(L1_MESSENGER).sendMessageToL1(keccak256(abi.encodePacked( + tokenL1, + tokenL2, + destination, + amount + ))); } } diff --git a/crates/l2/contracts/src/l2/L1Messenger.sol b/crates/l2/contracts/src/l2/L2ToL1Messenger.sol similarity index 68% rename from crates/l2/contracts/src/l2/L1Messenger.sol rename to crates/l2/contracts/src/l2/L2ToL1Messenger.sol index c1dafe004c..37dd841c76 100644 --- a/crates/l2/contracts/src/l2/L1Messenger.sol +++ b/crates/l2/contracts/src/l2/L2ToL1Messenger.sol @@ -1,11 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity =0.8.29; -import "./interfaces/IL1Messenger.sol"; +import "./interfaces/IL2ToL1Messenger.sol"; -/// @title L1Messenger contract. +/// @title L2ToL1Messenger contract. /// @author LambdaClass -contract L1Messenger is IL1Messenger { +contract L2ToL1Messenger is IL2ToL1Messenger { function sendMessageToL1(bytes32 data) external { // This event gets pushed to L1, the sequencer monitors // them on every block. diff --git a/crates/l2/contracts/src/l2/interfaces/ICommonBridgeL2.sol b/crates/l2/contracts/src/l2/interfaces/ICommonBridgeL2.sol index 28b9236777..1330098acb 100644 --- a/crates/l2/contracts/src/l2/interfaces/ICommonBridgeL2.sol +++ b/crates/l2/contracts/src/l2/interfaces/ICommonBridgeL2.sol @@ -6,6 +6,14 @@ pragma solidity =0.8.29; /// @notice A CommonBridge contract is a contract that allows L1<->L2 communication /// It handles user withdrawals and message sending to L1. interface ICommonBridgeL2 { + /// @notice An ETH deposit was successfully processed + /// @dev Event emitted when an ETH deposit is processed. + /// @param receiver the address that received the ETH + /// @param amount the amount of ether being deposited + event DepositProcessed( + address indexed receiver, + uint256 amount + ); /// @notice A withdrawal to L1 has initiated. /// @dev Event emitted when a withdrawal is initiated. /// @param senderOnL2 the sender of the transaction on L2. @@ -17,6 +25,32 @@ interface ICommonBridgeL2 { uint256 indexed amount ); + + /// @notice An ERC20 token deposit was successfully processed + /// @dev Event emitted when an ERC20 deposit is processed. + /// @param tokenL1 Address of the token on L1 + /// @param tokenL2 Address of the token on L2 + /// @param receiver the address that received the tokens + /// @param amount the amount of tokens being deposited + event ERC20DepositProcessed( + address indexed tokenL1, + address indexed tokenL2, + address indexed receiver, + uint256 amount + ); + /// @notice An ERC20 token withdrawal has initiated + /// @dev Event emitted when an ERC20 withdrawal is initiated. + /// @param tokenL1 Address of the token on L1 + /// @param tokenL2 Address of the token on L2 + /// @param receiverOnL1 the address on L1 that will receive the funds back. + /// @param amount the amount of tokens being withdrawn. + event ERC20WithdrawalInitiated( + address indexed tokenL1, + address indexed tokenL2, + address indexed receiverOnL1, + uint256 amount + ); + /// @notice Initiates the withdrawal of funds to the L1. /// @dev This is the first step in the two step process of a user withdrawal. /// @dev It burns funds on L2 and sends a message to the L1 so users @@ -30,4 +64,22 @@ interface ICommonBridgeL2 { /// @dev If the transfer fails, a withdrawal is initiated. /// @param to the address to transfer the funds to function mintETH(address to) external payable; + + /// @notice Tries to deposit an ERC20 token + /// @dev The msg.sender must be the bridge itself, using a privileged transaction + /// @param tokenL1 Address of the token on L1 + /// @param tokenL2 Address of the token on L2 + /// @param destination Address that should receive the tokens + /// @param amount Amount of tokens to give + function mintERC20(address tokenL1, address tokenL2, address destination, uint256 amount) external; + + /// @notice Initiates the withdrawal of ERC20 tokens to the L1. + /// @dev This is the first step in the two step process of a user withdrawal. + /// @dev It burns tokens on L2 and sends a message to the L1 so users + /// @dev can claim those tokens on L1. + /// @param tokenL1 Address of the token on L1 + /// @param tokenL2 Address of the token on L2 + /// @param destination Address on L1 that should receive the tokens + /// @param amount Amount of tokens to withdraw + function withdrawERC20(address tokenL1, address tokenL2, address destination, uint256 amount) external; } diff --git a/crates/l2/contracts/src/l2/interfaces/IERC20L2.sol b/crates/l2/contracts/src/l2/interfaces/IERC20L2.sol new file mode 100644 index 0000000000..8bbc6d25e8 --- /dev/null +++ b/crates/l2/contracts/src/l2/interfaces/IERC20L2.sol @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: MIT +pragma solidity =0.8.29; + +import "@openzeppelin/contracts/token/ERC20/IERC20.sol"; + +/// @title Interface for an L2-capable token. +/// @author LambdaClass +/// @dev Uses the interface described in the ERC-7802 draft +interface IERC20L2 is IERC20 { + /// @notice Returns the address of the token on the L1 + /// @dev Used to verify token reception. + function l1Address() external returns (address); + + /// @notice Mints tokens to the given address + /// @dev Should be callable by the bridge + function crosschainMint(address to, uint256 amount) external; + + /// @notice Burns tokens from the given address + /// @dev Should be callable by the bridge + function crosschainBurn(address from, uint256 amount) external; +} diff --git a/crates/l2/contracts/src/l2/interfaces/IL1Messenger.sol b/crates/l2/contracts/src/l2/interfaces/IL2ToL1Messenger.sol similarity index 96% rename from crates/l2/contracts/src/l2/interfaces/IL1Messenger.sol rename to crates/l2/contracts/src/l2/interfaces/IL2ToL1Messenger.sol index ed6812dd42..7e5b91a952 100644 --- a/crates/l2/contracts/src/l2/interfaces/IL1Messenger.sol +++ b/crates/l2/contracts/src/l2/interfaces/IL2ToL1Messenger.sol @@ -5,7 +5,7 @@ pragma solidity =0.8.29; /// @author LambdaClass /// @notice The L1Messenger contract is a contract that allows L2->L1 communication /// It handles message sending to L1, which is used to handle withdrawals. -interface IL1Messenger { +interface IL2ToL1Messenger { /// @notice A withdrawal to L1 has initiated. /// @dev Event emitted when a withdrawal is initiated. /// @param senderOnL2 the caller on L2 diff --git a/crates/l2/sdk/src/sdk.rs b/crates/l2/sdk/src/sdk.rs index 153d88389a..5a8db8c9fe 100644 --- a/crates/l2/sdk/src/sdk.rs +++ b/crates/l2/sdk/src/sdk.rs @@ -1,4 +1,6 @@ -use std::{fs::read_to_string, path::Path, process::Command}; +use std::path::PathBuf; +use std::process::{Command, ExitStatus}; +use std::{fs::read_to_string, path::Path}; use bytes::Bytes; use calldata::encode_calldata; @@ -20,6 +22,7 @@ pub mod l1_to_l2_tx_data; pub mod merkle_tree; pub use l1_to_l2_tx_data::{L1ToL2TransactionData, send_l1_to_l2_tx}; +use tracing::{info, trace}; // 0x8ccf74999c496e4d27a2b02941673f41dd0dab2a pub const DEFAULT_BRIDGE_ADDRESS: Address = H160([ @@ -32,7 +35,7 @@ pub const COMMON_BRIDGE_L2_ADDRESS: Address = H160([ 0x00, 0x00, 0xff, 0xff, ]); -pub const L1_MESSENGER_ADDRESS: Address = H160([ +pub const L2_TO_L1_MESSENGER_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xfe, ]); @@ -212,6 +215,100 @@ pub async fn claim_withdraw( .await } +pub async fn claim_erc20withdraw( + token_l1: Address, + token_l2: Address, + amount: U256, + l2_withdrawal_tx_hash: H256, + from_pk: SecretKey, + eth_client: &EthClient, + message_proof: &L1MessageProof, +) -> Result { + let from = get_address_from_secret_key(&from_pk)?; + const CLAIM_WITHDRAWAL_ERC20_SIGNATURE: &str = + "claimWithdrawalERC20(bytes32,address,address,uint256,uint256,uint256,bytes32[])"; + + let calldata_values = vec![ + Value::Uint(U256::from_big_endian( + l2_withdrawal_tx_hash.as_fixed_bytes(), + )), + Value::Address(token_l1), + Value::Address(token_l2), + Value::Uint(amount), + Value::Uint(U256::from(message_proof.batch_number)), + Value::Uint(U256::from(message_proof.index)), + Value::Array( + message_proof + .merkle_proof + .iter() + .map(|v| Value::Uint(U256::from_big_endian(v.as_bytes()))) + .collect(), + ), + ]; + + let claim_withdrawal_data = + encode_calldata(CLAIM_WITHDRAWAL_ERC20_SIGNATURE, &calldata_values)?; + + println!( + "Claiming withdrawal with calldata: {}", + hex::encode(&claim_withdrawal_data) + ); + + let claim_tx = eth_client + .build_eip1559_transaction( + bridge_address().map_err(|err| EthClientError::Custom(err.to_string()))?, + from, + claim_withdrawal_data.into(), + Overrides { + from: Some(from), + ..Default::default() + }, + ) + .await?; + + eth_client + .send_eip1559_transaction(&claim_tx, &from_pk) + .await +} + +pub async fn deposit_erc20( + token_l1: Address, + token_l2: Address, + amount: U256, + from: Address, + from_pk: SecretKey, + eth_client: &EthClient, +) -> Result { + println!("Claiming {amount} from bridge to {from:#x}"); + + const DEPOSIT_ERC20_SIGNATURE: &str = "depositERC20(address,address,address,uint256)"; + + let calldata_values = vec![ + Value::Address(token_l1), + Value::Address(token_l2), + Value::Address(from), + Value::Uint(amount), + ]; + + let deposit_data = encode_calldata(DEPOSIT_ERC20_SIGNATURE, &calldata_values)?; + + let deposit_tx = eth_client + .build_eip1559_transaction( + bridge_address().map_err(|err| EthClientError::Custom(err.to_string()))?, + from, + deposit_data.into(), + Overrides { + from: Some(from), + ..Default::default() + }, + ) + .await?; + + eth_client + .send_eip1559_transaction(&deposit_tx, &from_pk) + .await +} + pub fn secret_key_deserializer<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, @@ -555,3 +652,194 @@ pub async fn call_contract( wait_for_transaction_receipt(tx_hash, client, 100).await?; Ok(tx_hash) } + +#[derive(Debug, thiserror::Error)] +pub enum GitError { + #[error("Failed to clone: {0}")] + DependencyError(String), + #[error("Internal error: {0}")] + InternalError(String), + #[error("Failed to get string from path")] + FailedToGetStringFromPath, +} + +pub fn git_clone( + repository_url: &str, + outdir: &str, + branch: Option<&str>, + submodules: bool, +) -> Result { + info!(repository_url = %repository_url, outdir = %outdir, branch = ?branch, "Cloning or updating git repository"); + + if PathBuf::from(outdir).join(".git").exists() { + info!(outdir = %outdir, "Found existing git repository, updating..."); + + let branch_name = if let Some(b) = branch { + b.to_string() + } else { + // Look for default branch name (could be main, master or other) + let output = Command::new("git") + .current_dir(outdir) + .arg("symbolic-ref") + .arg("refs/remotes/origin/HEAD") + .output() + .map_err(|e| { + GitError::DependencyError(format!( + "Failed to get default branch for {outdir}: {e}" + )) + })?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(GitError::DependencyError(format!( + "Failed to get default branch for {outdir}: {stderr}" + ))); + } + + String::from_utf8(output.stdout) + .map_err(|_| GitError::InternalError("Failed to parse git output".to_string()))? + .trim() + .split('/') + .next_back() + .ok_or(GitError::InternalError( + "Failed to parse default branch".to_string(), + ))? + .to_string() + }; + + trace!(branch = %branch_name, "Updating to branch"); + + // Fetch + let fetch_status = Command::new("git") + .current_dir(outdir) + .args(["fetch", "origin"]) + .spawn() + .map_err(|err| GitError::DependencyError(format!("Failed to spawn git fetch: {err}")))? + .wait() + .map_err(|err| { + GitError::DependencyError(format!("Failed to wait for git fetch: {err}")) + })?; + if !fetch_status.success() { + return Err(GitError::DependencyError(format!( + "git fetch failed for {outdir}" + ))); + } + + // Checkout to branch + let checkout_status = Command::new("git") + .current_dir(outdir) + .arg("checkout") + .arg(&branch_name) + .spawn() + .map_err(|err| { + GitError::DependencyError(format!("Failed to spawn git checkout: {err}")) + })? + .wait() + .map_err(|err| { + GitError::DependencyError(format!("Failed to wait for git checkout: {err}")) + })?; + if !checkout_status.success() { + return Err(GitError::DependencyError(format!( + "git checkout of branch {branch_name} failed for {outdir}, try deleting the repo folder" + ))); + } + + // Reset branch to origin + let reset_status = Command::new("git") + .current_dir(outdir) + .arg("reset") + .arg("--hard") + .arg(format!("origin/{branch_name}")) + .spawn() + .map_err(|err| GitError::DependencyError(format!("Failed to spawn git reset: {err}")))? + .wait() + .map_err(|err| { + GitError::DependencyError(format!("Failed to wait for git reset: {err}")) + })?; + + if !reset_status.success() { + return Err(GitError::DependencyError(format!( + "git reset failed for {outdir}" + ))); + } + + // Update submodules + if submodules { + let submodule_status = Command::new("git") + .current_dir(outdir) + .arg("submodule") + .arg("update") + .arg("--init") + .arg("--recursive") + .spawn() + .map_err(|err| { + GitError::DependencyError(format!( + "Failed to spawn git submodule update: {err}" + )) + })? + .wait() + .map_err(|err| { + GitError::DependencyError(format!( + "Failed to wait for git submodule update: {err}" + )) + })?; + if !submodule_status.success() { + return Err(GitError::DependencyError(format!( + "git submodule update failed for {outdir}" + ))); + } + } + + Ok(reset_status) + } else { + trace!(repository_url = %repository_url, outdir = %outdir, branch = ?branch, "Cloning git repository"); + let mut git_cmd = Command::new("git"); + + let git_clone_cmd = git_cmd.arg("clone").arg(repository_url); + + if let Some(branch) = branch { + git_clone_cmd.arg("--branch").arg(branch); + } + + if submodules { + git_clone_cmd.arg("--recurse-submodules"); + } + + git_clone_cmd + .arg(outdir) + .spawn() + .map_err(|err| GitError::DependencyError(format!("Failed to spawn git: {err}")))? + .wait() + .map_err(|err| GitError::DependencyError(format!("Failed to wait for git: {err}"))) + } +} + +pub fn download_contract_deps(contracts_path: &Path) -> Result<(), GitError> { + trace!("Downloading contract dependencies"); + std::fs::create_dir_all(contracts_path.join("lib")).map_err(|err| { + GitError::DependencyError(format!("Failed to create contracts/lib: {err}")) + })?; + + git_clone( + "https://github.com/OpenZeppelin/openzeppelin-contracts-upgradeable.git", + contracts_path + .join("lib/openzeppelin-contracts-upgradeable") + .to_str() + .ok_or(GitError::FailedToGetStringFromPath)?, + None, + true, + )?; + + git_clone( + "https://github.com/succinctlabs/sp1-contracts.git", + contracts_path + .join("lib/sp1-contracts") + .to_str() + .ok_or(GitError::FailedToGetStringFromPath)?, + None, + false, + )?; + + trace!("Contract dependencies downloaded"); + Ok(()) +} diff --git a/crates/l2/tests/tests.rs b/crates/l2/tests/tests.rs index 377ebd86f7..25ec1d0610 100644 --- a/crates/l2/tests/tests.rs +++ b/crates/l2/tests/tests.rs @@ -6,7 +6,11 @@ use ethrex_common::{H160, types::BlockNumber}; use ethrex_l2_common::calldata::Value; use ethrex_l2_sdk::calldata::{self}; use ethrex_l2_sdk::l1_to_l2_tx_data::L1ToL2TransactionData; -use ethrex_l2_sdk::{get_address_from_secret_key, wait_for_transaction_receipt}; +use ethrex_l2_sdk::{ + COMMON_BRIDGE_L2_ADDRESS, claim_erc20withdraw, compile_contract, deposit_erc20, + get_address_from_secret_key, wait_for_transaction_receipt, +}; +use ethrex_rpc::clients::eth::from_hex_string_to_u256; use ethrex_rpc::clients::eth::{BlockByNumber, EthClient, eth_sender::Overrides}; use ethrex_rpc::types::receipt::RpcReceipt; use hex::FromHexError; @@ -14,7 +18,7 @@ use keccak_hash::{H256, keccak}; use secp256k1::SecretKey; use std::fs::{File, read_to_string}; use std::io::{BufRead, BufReader}; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::{ops::Mul, str::FromStr, time::Duration}; /// Test the full flow of depositing, depositing with contract call, transferring, and withdrawing funds @@ -111,6 +115,8 @@ async fn l2_integration_test() -> Result<(), Box> { test_deposit_not_enough_balance(&transfer_return_private_key, ð_client, &proposer_client) .await?; + test_erc20_roundtrip(bridge_address, &proposer_client, ð_client).await?; + let withdrawals_count = std::env::var("INTEGRATION_TEST_WITHDRAW_COUNT") .map(|amount| amount.parse().expect("Invalid withdrawal amount value")) .unwrap_or(5); @@ -243,6 +249,181 @@ async fn test_deposit_with_contract_call_revert( Ok(()) } +async fn test_erc20_roundtrip( + bridge_address: Address, + l2_client: &EthClient, + l1_client: &EthClient, +) -> Result<(), Box> { + let token_amount: U256 = U256::from(100); + + let rich_wallet_private_key = l1_rich_wallet_private_key(); + let rich_address = ethrex_l2_sdk::get_address_from_secret_key(&rich_wallet_private_key) + .expect("Failed to get address"); + + let init_code_l1 = hex::decode(std::fs::read( + "../../fixtures/contracts/ERC20/ERC20.bin/TestToken.bin", + )?)?; + let token_l1 = test_deploy_l1(&init_code_l1, &rich_wallet_private_key, l1_client).await?; + + let contracts_path = Path::new("contracts"); + ethrex_l2_sdk::download_contract_deps(contracts_path)?; + compile_contract(contracts_path, "src/example/L2ERC20.sol", false)?; + let init_code_l2_inner = hex::decode(String::from_utf8(std::fs::read( + "contracts/solc_out/TestTokenL2.bin", + )?)?)?; + let init_code_l2 = [ + init_code_l2_inner, + vec![0u8; 12], + token_l1.to_fixed_bytes().to_vec(), + ] + .concat(); + let token_l2 = test_deploy(&init_code_l2, &rich_wallet_private_key, l2_client).await?; + + println!("token l1={token_l1:x}, l2={token_l2:x}"); + test_send( + l1_client, + &rich_wallet_private_key, + token_l1, + "freeMint()", + &[], + ) + .await; + test_send( + l1_client, + &rich_wallet_private_key, + token_l1, + "approve(address,uint256)", + &[Value::Address(bridge_address), Value::Uint(token_amount)], + ) + .await; + let initial_balance = test_balance_of(l1_client, token_l1, rich_address).await; + let deposit_tx = deposit_erc20( + token_l1, + token_l2, + token_amount, + rich_address, + rich_wallet_private_key, + l1_client, + ) + .await + .unwrap(); + let res = wait_for_transaction_receipt(deposit_tx, l1_client, 10) + .await + .unwrap(); + wait_for_l2_deposit_receipt(res.block_info.block_number, l1_client, l2_client) + .await + .unwrap(); + let remaining_l1_balance = test_balance_of(l1_client, token_l1, rich_address).await; + let l2_balance = test_balance_of(l2_client, token_l2, rich_address).await; + assert_eq!(initial_balance - remaining_l1_balance, token_amount); + assert_eq!(l2_balance, token_amount); + + test_send( + l2_client, + &rich_wallet_private_key, + token_l2, + "approve(address,uint256)", + &[ + Value::Address(COMMON_BRIDGE_L2_ADDRESS), + Value::Uint(token_amount), + ], + ) + .await; + let res = test_send( + l2_client, + &rich_wallet_private_key, + COMMON_BRIDGE_L2_ADDRESS, + "withdrawERC20(address,address,address,uint256)", + &[ + Value::Address(token_l1), + Value::Address(token_l2), + Value::Address(rich_address), + Value::Uint(token_amount), + ], + ) + .await; + let proof = l2_client + .wait_for_message_proof(res.tx_info.transaction_hash, 1000) + .await; + let proof = proof.unwrap().into_iter().next().expect("proof not found"); + + let on_chain_proposer_address = Address::from_str( + &std::env::var("ETHREX_COMMITTER_ON_CHAIN_PROPOSER_ADDRESS") + .expect("ETHREX_COMMITTER_ON_CHAIN_PROPOSER_ADDRESS env var not set"), + ) + .unwrap(); + while l1_client + .get_last_verified_batch(on_chain_proposer_address) + .await + .unwrap() + < proof.batch_number + { + println!("Withdrawal is not verified on L1 yet"); + tokio::time::sleep(Duration::from_secs(2)).await; + } + + let withdraw_claim_tx = claim_erc20withdraw( + token_l1, + token_l2, + token_amount, + res.tx_info.transaction_hash, + rich_wallet_private_key, + l1_client, + &proof, + ) + .await + .expect("error while claiming"); + wait_for_transaction_receipt(withdraw_claim_tx, l1_client, 5).await?; + let l1_final_balance = test_balance_of(l1_client, token_l1, rich_address).await; + let l2_final_balance = test_balance_of(l2_client, token_l2, rich_address).await; + assert_eq!(initial_balance, l1_final_balance); + assert!(l2_final_balance.is_zero()); + Ok(()) +} + +async fn test_balance_of(client: &EthClient, token: Address, user: Address) -> U256 { + let res = client + .call( + token, + ethrex_l2_sdk::calldata::encode_calldata("balanceOf(address)", &[Value::Address(user)]) + .unwrap() + .into(), + Default::default(), + ) + .await + .unwrap(); + from_hex_string_to_u256(&res).unwrap() +} + +async fn test_send( + client: &EthClient, + private_key: &SecretKey, + to: Address, + signature: &str, + data: &[Value], +) -> RpcReceipt { + let caller_address = + ethrex_l2_sdk::get_address_from_secret_key(private_key).expect("Failed to get address"); + let tx = client + .build_eip1559_transaction( + to, + caller_address, + ethrex_l2_sdk::calldata::encode_calldata(signature, data) + .unwrap() + .into(), + Default::default(), + ) + .await + .unwrap(); + let tx_hash = client + .send_eip1559_transaction(&tx, private_key) + .await + .unwrap(); + ethrex_l2_sdk::wait_for_transaction_receipt(tx_hash, client, 10) + .await + .unwrap() +} + async fn test_deposit( depositor_private_key: &SecretKey, bridge_address: Address, @@ -681,24 +862,16 @@ async fn test_n_withdraws( proofs.push(withdrawal_proof); } + let on_chain_proposer_address = Address::from_str( + &std::env::var("ETHREX_COMMITTER_ON_CHAIN_PROPOSER_ADDRESS") + .expect("ETHREX_COMMITTER_ON_CHAIN_PROPOSER_ADDRESS env var not set"), + ) + .unwrap(); for proof in &proofs { - while u64::from_str_radix( - eth_client - .call( - Address::from_str( - &std::env::var("ETHREX_COMMITTER_ON_CHAIN_PROPOSER_ADDRESS") - .expect("ETHREX_COMMITTER_ON_CHAIN_PROPOSER_ADDRESS env var not set"), - ) - .unwrap(), - calldata::encode_calldata("lastVerifiedBatch()", &[])?.into(), - Overrides::default(), - ) - .await? - .get(2..) - .unwrap(), - 16, - ) - .unwrap() + while eth_client + .get_last_verified_batch(on_chain_proposer_address) + .await + .unwrap() < proof.batch_number { println!("Withdrawal is not verified on L1 yet"); @@ -865,6 +1038,29 @@ async fn test_deploy( Ok(contract_address) } +async fn test_deploy_l1( + init_code: &[u8], + private_key: &SecretKey, + client: &EthClient, +) -> Result> { + println!("Deploying contract on L1"); + + let deployer_address = ethrex_l2_sdk::get_address_from_secret_key(private_key)?; + + let (deploy_tx_hash, contract_address) = client + .deploy( + deployer_address, + *private_key, + init_code.to_vec().into(), + Overrides::default(), + ) + .await?; + + ethrex_l2_sdk::wait_for_transaction_receipt(deploy_tx_hash, client, 5).await?; + + Ok(contract_address) +} + async fn test_call_to_contract_with_deposit( deployed_contract_address: Address, calldata_to_contract: Bytes, diff --git a/fixtures/genesis/l2.json b/fixtures/genesis/l2.json index f177b48cd2..74b9029fa4 100644 --- a/fixtures/genesis/l2.json +++ b/fixtures/genesis/l2.json @@ -41,16 +41,16 @@ "coinbase": "0x0000000000000000000000000000000000000000", "alloc": { "0x000000000000000000000000000000000000fffe": { - "code": "0x60806040526004361015610013575b6100bb565b61001d5f3561002b565b62cffbe50361000e57610088565b60e01c90565b60405190565b5f80fd5b5f80fd5b90565b61004b8161003f565b0361005257565b5f80fd5b9050359061006382610042565b565b9060208282031261007e5761007b915f01610056565b90565b61003b565b5f0190565b346100b6576100a061009b366004610065565b61010d565b6100a8610031565b806100b281610083565b0390f35b610037565b5f80fd5b60018060a01b031690565b90565b6100e16100dc6100e6926100bf565b6100ca565b6100bf565b90565b6100f2906100cd565b90565b6100fe906100e9565b90565b61010a9061003f565b90565b339061014261013c7fe2eb319166f66bdc0da4ccabd81814f7d4e5689f7860243bbfeb3a7d461421f5936100f5565b91610101565b9161014b610031565b8061015581610083565b0390a356fea2646970667358221220af3ee29b80280058a52bf7fad84cc3180c29067567a1eb33cefbc4aef6278f3664736f6c634300081d0033", + "code": "0x60806040526004361015610013575b6100bb565b61001d5f3561002b565b62cffbe50361000e57610088565b60e01c90565b60405190565b5f80fd5b5f80fd5b90565b61004b8161003f565b0361005257565b5f80fd5b9050359061006382610042565b565b9060208282031261007e5761007b915f01610056565b90565b61003b565b5f0190565b346100b6576100a061009b366004610065565b61010d565b6100a8610031565b806100b281610083565b0390f35b610037565b5f80fd5b60018060a01b031690565b90565b6100e16100dc6100e6926100bf565b6100ca565b6100bf565b90565b6100f2906100cd565b90565b6100fe906100e9565b90565b61010a9061003f565b90565b339061014261013c7fe2eb319166f66bdc0da4ccabd81814f7d4e5689f7860243bbfeb3a7d461421f5936100f5565b91610101565b9161014b610031565b8061015581610083565b0390a356fea2646970667358221220f78418bd909dc7b09815aacd7f56e47c5a54cbecef0d36d9bc84e188c027794f64736f6c634300081d0033", "storage": {}, "balance": "0x0", "nonce": "0x1" }, "0x000000000000000000000000000000000000ffff": { - "code": "0x60806040526004361015610013575b6101da565b61001d5f3561005c565b806351cff8d9146100575780637e1233a914610052578063b0f4d3951461004d5763fccc28130361000e576101a5565b61016d565b610138565b6100c9565b60e01c90565b60405190565b5f80fd5b60018060a01b031690565b6100809061006c565b90565b61008c81610077565b0361009357565b5f80fd5b905035906100a482610083565b565b906020828203126100bf576100bc915f01610097565b90565b610068565b5f0190565b6100dc6100d73660046100a6565b610528565b6100e4610062565b806100ee816100c4565b0390f35b5f80fd5b5f91031261010057565b610068565b61fffe90565b610113610105565b90565b61011f90610077565b9052565b9190610136905f60208501940190610116565b565b34610168576101483660046100f6565b61016461015361010b565b61015b610062565b91829182610123565b0390f35b6100f2565b61018061017b3660046100a6565b61065c565b610188610062565b80610192816100c4565b0390f35b5f90565b6101a2610196565b90565b346101d5576101b53660046100f6565b6101d16101c061019a565b6101c8610062565b91829182610123565b0390f35b6100f2565b5f80fd5b90565b90565b90565b6101fb6101f6610200926101e1565b6101e4565b6101de565b90565b60209181520190565b60207f7665000000000000000000000000000000000000000000000000000000000000917f5769746864726177616c20616d6f756e74206d75737420626520706f736974695f8201520152565b6102666022604092610203565b61026f8161020c565b0190565b6102889060208101905f818303910152610259565b90565b1561029257565b61029a610062565b62461bcd60e51b8152806102b060048201610273565b0390fd5b905090565b6102c45f80926102b4565b0190565b6102d1906102b9565b90565b601f801991011690565b634e487b7160e01b5f52604160045260245ffd5b906102fc906102d4565b810190811067ffffffffffffffff82111761031657604052565b6102de565b9061032e610327610062565b92836102f2565b565b67ffffffffffffffff811161034e5761034a6020916102d4565b0190565b6102de565b9061036561036083610330565b61031b565b918252565b606090565b3d5f1461038a5761037f3d610353565b903d5f602084013e5b565b61039261036a565b90610388565b5f7f4661696c656420746f206275726e204574686572000000000000000000000000910152565b6103cc6014602092610203565b6103d581610398565b0190565b6103ee9060208101905f8183039101526103bf565b90565b156103f857565b610400610062565b62461bcd60e51b815280610416600482016103d9565b0390fd5b61042e6104296104339261006c565b6101e4565b61006c565b90565b61043f9061041a565b90565b61044b90610436565b90565b6104579061041a565b90565b6104639061044e565b90565b60601b90565b61047590610466565b90565b6104819061046c565b90565b61049061049591610077565b610478565b9052565b90565b6104a86104ad916101de565b610499565b9052565b6014816104c46104cc9360209695610484565b01809261049c565b0190565b60200190565b5190565b5f80fd5b60e01b90565b5f9103126104ee57565b610068565b90565b6104ff906104f3565b9052565b9190610516905f602085019401906104f6565b565b610520610062565b3d5f823e3d90fd5b6105443461053e6105385f6101e7565b916101de565b1161028b565b6105775f80610551610196565b3461055a610062565b9081610565816102c8565b03925af161057161036f565b506103f1565b61058f61058a610585610105565b610442565b61045a565b9062cffbe5906105c034916105b16105a5610062565b938492602084016104b1565b602082018103825203826102f2565b6105d26105cc826104d6565b916104d0565b20823b1561064657610603926105f85f80946105ec610062565b968795869485936104de565b835260048301610503565b03925af1801561064157610615575b50565b610634905f3d811161063a575b61062c81836102f2565b8101906104e4565b5f610612565b503d610622565b610518565b6104da565b151590565b6106599061044e565b90565b6106895f80833461066b610062565b9081610676816102c8565b03925af161068261036f565b501561064b565b610691575b50565b61069a30610650565b906351cff8d9349290929190803b1561071a576106ca5f936106d5956106be610062565b968795869485936104de565b835260048301610123565b03925af18015610715576106e9575b61068e565b610708905f3d811161070e575b61070081836102f2565b8101906104e4565b5f6106e4565b503d6106f6565b610518565b6104da56fea2646970667358221220a13a7739cafa3f04dd0c21f040f57dacaebad36941d9985768690fdf10f8bf1264736f6c634300081d0033", + "code": "0x60806040526004361015610013575b610383565b61001d5f3561009c565b806351cff8d91461009757806358bc83371461009257806379204fe01461008d57806379c0cdef146100885780637e1233a914610083578063b0f4d3951461007e578063d23061db146100795763fccc28130361000e5761034e565b610308565b6102df565b6102aa565b610262565b61022b565b61018a565b610109565b60e01c90565b60405190565b5f80fd5b60018060a01b031690565b6100c0906100ac565b90565b6100cc816100b7565b036100d357565b5f80fd5b905035906100e4826100c3565b565b906020828203126100ff576100fc915f016100d7565b90565b6100a8565b5f0190565b61011c6101173660046100e6565b610710565b6101246100a2565b8061012e81610104565b0390f35b5f80fd5b5f91031261014057565b6100a8565b73eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee90565b610165610145565b90565b610171906100b7565b9052565b9190610188905f60208501940190610168565b565b346101ba5761019a366004610136565b6101b66101a561015d565b6101ad6100a2565b91829182610175565b0390f35b610132565b90565b6101cb816101bf565b036101d257565b5f80fd5b905035906101e3826101c2565b565b608081830312610226576101fb825f83016100d7565b9261022361020c84602085016100d7565b9361021a81604086016100d7565b936060016101d6565b90565b6100a8565b3461025d5761024761023e3660046101e5565b92919091610af7565b61024f6100a2565b8061025981610104565b0390f35b610132565b346102945761027e6102753660046101e5565b92919091610ce9565b6102866100a2565b8061029081610104565b0390f35b610132565b61fffe90565b6102a7610299565b90565b346102da576102ba366004610136565b6102d66102c561029f565b6102cd6100a2565b91829182610175565b0390f35b610132565b6102f26102ed3660046100e6565b610cf7565b6102fa6100a2565b8061030481610104565b0390f35b3461033a5761032461031b3660046101e5565b92919091610dfe565b61032c6100a2565b8061033681610104565b0390f35b610132565b5f90565b61034b61033f565b90565b3461037e5761035e366004610136565b61037a610369610343565b6103716100a2565b91829182610175565b0390f35b610132565b5f80fd5b90565b90565b6103a161039c6103a692610387565b61038a565b6101bf565b90565b60209181520190565b60207f7665000000000000000000000000000000000000000000000000000000000000917f5769746864726177616c20616d6f756e74206d75737420626520706f736974695f8201520152565b61040c60226040926103a9565b610415816103b2565b0190565b61042e9060208101905f8183039101526103ff565b90565b1561043857565b6104406100a2565b62461bcd60e51b81528061045660048201610419565b0390fd5b905090565b61046a5f809261045a565b0190565b6104779061045f565b90565b601f801991011690565b634e487b7160e01b5f52604160045260245ffd5b906104a29061047a565b810190811067ffffffffffffffff8211176104bc57604052565b610484565b906104d46104cd6100a2565b9283610498565b565b67ffffffffffffffff81116104f4576104f060209161047a565b0190565b610484565b9061050b610506836104d6565b6104c1565b918252565b606090565b3d5f14610530576105253d6104f9565b903d5f602084013e5b565b610538610510565b9061052e565b5f7f4661696c656420746f206275726e204574686572000000000000000000000000910152565b61057260146020926103a9565b61057b8161053e565b0190565b6105949060208101905f818303910152610565565b90565b1561059e57565b6105a66100a2565b62461bcd60e51b8152806105bc6004820161057f565b0390fd5b6105d46105cf6105d9926100ac565b61038a565b6100ac565b90565b6105e5906105c0565b90565b6105f1906105dc565b90565b61060861060361060d926101bf565b61038a565b6101bf565b90565b610619906105c0565b90565b61062590610610565b90565b610631906105dc565b90565b60601b90565b61064390610634565b90565b61064f9061063a565b90565b61065e610663916100b7565b610646565b9052565b90565b61067661067b916101bf565b610667565b9052565b926106ac60146106b4946106a4828861069c60209b9a8399610652565b018092610652565b018092610652565b01809261066a565b0190565b60200190565b5190565b5f80fd5b60e01b90565b5f9103126106d657565b6100a8565b90565b6106e7906106db565b9052565b91906106fe905f602085019401906106de565b565b6107086100a2565b3d5f823e3d90fd5b61072c346107266107205f61038d565b916101bf565b11610431565b61075f5f8061073961033f565b346107426100a2565b908161074d8161046e565b03925af1610759610515565b50610597565b3381349061079f6107996107937fbb2689ff876f7ef453cf8865dde5ab10349d222e2e1383c5152fbdb083f02da2956105e8565b926105e8565b926105f4565b926107a86100a2565b806107b281610104565b0390a46107cd6107c86107c3610299565b61061c565b610628565b9062cffbe59061080f6107de610145565b6108006107e9610145565b9334906107f46100a2565b9586946020860161067f565b60208201810382520382610498565b61082161081b826106be565b916106b8565b20823b1561089557610852926108475f809461083b6100a2565b968795869485936106c6565b8352600483016106eb565b03925af1801561089057610864575b50565b610883905f3d8111610889575b61087b8183610498565b8101906106cc565b5f610861565b503d610871565b610700565b6106c2565b6108a3906105dc565b90565b60207f6520627269646765000000000000000000000000000000000000000000000000917f436f6d6d6f6e4272696467654c323a2063616c6c6572206973206e6f742074685f8201520152565b61090060286040926103a9565b610909816108a6565b0190565b6109229060208101905f8183039101526108f3565b90565b1561092c57565b6109346100a2565b62461bcd60e51b81528061094a6004820161090d565b0390fd5b9061097e9392916109793361097361096d6109683061089a565b6100b7565b916100b7565b14610925565b610a16565b565b63ffffffff1690565b63ffffffff60e01b1690565b6109a96109a46109ae92610980565b6106c6565b610989565b90565b6109ba906101bf565b9052565b6109f36109fa946109e96060949897956109df608086019a5f870190610168565b6020850190610168565b6040830190610168565b01906109b1565b565b151590565b9190610a14905f602085019401906109b1565b565b92919092610a855f80610a283061089a565b6004610a6c6379c0cdef610a5d88918b8d610a448d9293610995565b94610a4d6100a2565b97889660208801908152016109be565b60208201810382520382610498565b82602082019151925af1610a7e610515565b50156109fc565b610ae4575b92909192610adf610acd610ac7610ac17ff5353a2477e10b23280de25ca6cea55c17bb48000d8807ee631e514080e7fb4e946105e8565b946105e8565b946105e8565b94610ad66100a2565b91829182610a01565b0390a4565b610af2818584908692610f16565b610a8a565b90610b0393929161094e565b565b90610b35939291610b3033610b2a610b24610b1f3061089a565b6100b7565b916100b7565b14610925565b610bb6565b565b610b40906105c0565b90565b610b4c90610b37565b90565b610b58906105dc565b90565b90505190610b68826100c3565b565b90602082820312610b8357610b80915f01610b5b565b90565b6100a8565b15610b8f57565b5f80fd5b916020610bb4929493610bad60408201965f830190610168565b01906109b1565b565b90610bc090610b43565b610be46020610bce83610b4f565b63c2eeeebd90610bdc6100a2565b9384926106c6565b8252815f81610bf560048201610104565b03925af18015610ce457610c2c93610c27925f92610cac575b50610c1b610c21916100b7565b916100b7565b14610b88565b610b4f565b916318bf5077919092803b15610ca757610c595f8094610c64610c4d6100a2565b978896879586946106c6565b845260048401610b93565b03925af18015610ca257610c76575b50565b610c95905f3d8111610c9b575b610c8d8183610498565b8101906106cc565b5f610c73565b503d610c83565b610700565b6106c2565b610c21919250610cd5610c1b9160203d8111610cdd575b610ccd8183610498565b810190610b6a565b929150610c0e565b503d610cc3565b610700565b90610cf5939291610b05565b565b610d245f808334610d066100a2565b9081610d118161046e565b03925af1610d1d610515565b50156109fc565b610d6f575b3490610d6a610d587f85a190caa61692b36b63a55e069330d18ab9af179fed7a25c16a4262bc63b7d2926105e8565b92610d616100a2565b91829182610a01565b0390a2565b610d783061089a565b6351cff8d934919091908390803b15610df957610da85f93610db395610d9c6100a2565b968795869485936106c6565b835260048301610175565b03925af18015610df457610dc8575b50610d29565b610de7905f3d8111610ded575b610ddf8183610498565b8101906106cc565b5f610dc2565b503d610dd5565b610700565b6106c2565b929091610e1d82610e17610e115f61038d565b916101bf565b11610431565b610e2e610e2984610b43565b610b4f565b93632b8c49e3338496803b15610f1157610e5b5f8094610e66610e4f6100a2565b9b8c96879586946106c6565b845260048401610b93565b03925af1948515610f0c57610ede95610ee0575b50808483908592610ed2610ec0610eba610eb47f54538b93c6e9b3f518076db2d896122f653fac2bb32fa0b6bc75097b9f332e75946105e8565b946105e8565b946105e8565b94610ec96100a2565b91829182610a01565b0390a492909192610f16565b565b610eff905f3d8111610f05575b610ef78183610498565b8101906106cc565b5f610e7a565b503d610eed565b610700565b6106c2565b9190610f6290610f53610f37610f32610f2d610299565b61061c565b610628565b9562cffbe5959294610f476100a2565b9586946020860161067f565b60208201810382520382610498565b610f74610f6e826106be565b916106b8565b20823b15610fe857610fa592610f9a5f8094610f8e6100a2565b968795869485936106c6565b8352600483016106eb565b03925af18015610fe357610fb7575b50565b610fd6905f3d8111610fdc575b610fce8183610498565b8101906106cc565b5f610fb4565b503d610fc4565b610700565b6106c256fea264697066735822122021094d46fd2b6014fcc19d0fe48d1c311b59052f06dc38da5cc017ceb3e86f0964736f6c634300081d0033", "storage": {}, "balance": "0x0", "nonce": "0x1" } } -} +} \ No newline at end of file From 014b69088d18845aa830f4ab485ba12aee6a5dbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20I=C3=B1aki=20Bilbao?= Date: Mon, 30 Jun 2025 16:52:03 -0300 Subject: [PATCH 28/40] feat(l2): add instance info to Grafana alerts (#3333) **Motivation** We need to easily differentiate between environments when alerts come up (staging-1, staging-2, etc.). **Description** Add an "$INSTANCE" variable in the Slack contact point so it's over-ridden with an env var. --- .../metrics/docker-compose-metrics-alerts.overrides.yaml | 1 + .../grafana_provisioning/alerting/contact_points.json | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/blockchain/metrics/docker-compose-metrics-alerts.overrides.yaml b/crates/blockchain/metrics/docker-compose-metrics-alerts.overrides.yaml index f3ce9507df..e77e216fe9 100644 --- a/crates/blockchain/metrics/docker-compose-metrics-alerts.overrides.yaml +++ b/crates/blockchain/metrics/docker-compose-metrics-alerts.overrides.yaml @@ -5,3 +5,4 @@ services: environment: - ALERTS_SLACK_TOKEN=${GRAFANA_SLACK_TOKEN:?Slack token is needed for alerts} - ALERTS_SLACK_CHANNEL=${GRAFANA_SLACK_CHANNEL:?Slack channel is needed for alerts} + - INSTANCE=${GRAFANA_INSTANCE:-docker} diff --git a/crates/blockchain/metrics/provisioning/grafana_provisioning/alerting/contact_points.json b/crates/blockchain/metrics/provisioning/grafana_provisioning/alerting/contact_points.json index e27a4df8b7..1e1636bb9a 100644 --- a/crates/blockchain/metrics/provisioning/grafana_provisioning/alerting/contact_points.json +++ b/crates/blockchain/metrics/provisioning/grafana_provisioning/alerting/contact_points.json @@ -10,7 +10,7 @@ "type": "slack", "settings": { "recipient": "${ALERTS_SLACK_CHANNEL}", - "text": "{{ define \"slack.body\" -}}\n{{ .CommonAnnotations.description }}\n{{- end }}\n{{ template \"slack.body\" . }}", + "text": "{{ define \"slack.body\" -}}\n[$INSTANCE]\n{{ .CommonAnnotations.description }}\n{{- end }}\n{{ template \"slack.body\" . }}", "title": "{{ define \"slack.title\" -}}{{- if eq .Status \"firing\" -}}🚨{{- else -}}✅ [SOLVED]{{- end }} {{ .CommonAnnotations.summary }}{{- end }}{{ template \"slack.title\" . }}", "token": "${ALERTS_SLACK_TOKEN}" }, From fde712a46694e07e0153c25a0593ffdf4346352e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Avila=20Gast=C3=B3n?= <72628438+avilagaston9@users.noreply.github.com> Date: Mon, 30 Jun 2025 17:59:41 -0300 Subject: [PATCH 29/40] feat(l2): exchange commit hash in node-prover communication (#3339) **Motivation** We want to prevent a divergence between the code that is running in the L2 node and the prover. **Description** - Updates the client version to use `GIT_BRANCH` and `GIT_SHA` instead of `RUSTC_COMMIT_HASH`. - Adds a `build.rs` script for both the node and prover, using `vergen_git2` to export the git env vars. - Adds a `code_version` field to the `BatchRequest` message. - Introduces a new `ProofData` message: `InvalidCodeVersion`. You can create an empty commit with: ```bash git commit --allow-empty -m "empty commit" ``` Then run the node and the prover using different commits. > [!WARNING] > Remember to run `make build-prover` whenever you change the commit Closes #3311 --- Cargo.lock | 67 ++++++++++++- cmd/ethrex/Cargo.toml | 3 +- cmd/ethrex/build.rs | 19 +++- cmd/ethrex/utils.rs | 5 +- crates/l2/Cargo.toml | 3 + crates/l2/build.rs | 13 +++ crates/l2/prover/Cargo.toml | 4 + crates/l2/prover/build.rs | 13 +++ crates/l2/prover/src/prover.rs | 45 ++++++--- crates/l2/sequencer/proof_coordinator.rs | 106 ++++++++++++-------- crates/l2/tee/quote-gen/Cargo.lock | 119 +++++++++++++++++++++++ crates/l2/tee/quote-gen/Cargo.toml | 5 + crates/l2/tee/quote-gen/build.rs | 13 +++ crates/l2/tee/quote-gen/src/main.rs | 31 +++--- crates/l2/tee/quote-gen/src/sender.rs | 16 ++- 15 files changed, 380 insertions(+), 82 deletions(-) create mode 100644 crates/l2/build.rs create mode 100644 crates/l2/prover/build.rs create mode 100644 crates/l2/tee/quote-gen/build.rs diff --git a/Cargo.lock b/Cargo.lock index 8e4593730c..8e6b2ab8c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3877,7 +3877,7 @@ dependencies = [ "tokio-util", "tracing", "tracing-subscriber 0.3.19", - "vergen", + "vergen-git2", ] [[package]] @@ -3989,6 +3989,7 @@ dependencies = [ "tokio", "tokio-util", "tracing", + "vergen-git2", "zkvm_interface", ] @@ -4140,6 +4141,7 @@ dependencies = [ "tokio-util", "tracing", "tracing-subscriber 0.3.19", + "vergen-git2", "zkvm_interface", ] @@ -4922,6 +4924,19 @@ version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +[[package]] +name = "git2" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2deb07a133b1520dc1a5690e9bd08950108873d7ed5de38dcc74d3b5ebffa110" +dependencies = [ + "bitflags 2.9.1", + "libc", + "libgit2-sys", + "log", + "url", +] + [[package]] name = "glam" version = "0.20.5" @@ -6206,6 +6221,18 @@ version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +[[package]] +name = "libgit2-sys" +version = "0.18.2+1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c42fe03df2bd3c53a3a9c7317ad91d80c81cd1fb0caec8d7cc4cd2bfa10c222" +dependencies = [ + "cc", + "libc", + "libz-sys", + "pkg-config", +] + [[package]] name = "libloading" version = "0.8.8" @@ -6401,6 +6428,18 @@ dependencies = [ "escape8259", ] +[[package]] +name = "libz-sys" +version = "1.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linux-raw-sys" version = "0.4.15" @@ -7007,6 +7046,15 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", +] + [[package]] name = "number_prefix" version = "0.4.0" @@ -11257,7 +11305,9 @@ checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", + "libc", "num-conv", + "num_threads", "powerfmt", "serde", "time-core", @@ -12343,6 +12393,21 @@ dependencies = [ "vergen-lib", ] +[[package]] +name = "vergen-git2" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f6ee511ec45098eabade8a0750e76eec671e7fb2d9360c563911336bea9cac1" +dependencies = [ + "anyhow", + "derive_builder", + "git2", + "rustversion", + "time", + "vergen", + "vergen-lib", +] + [[package]] name = "vergen-lib" version = "0.1.6" diff --git a/cmd/ethrex/Cargo.toml b/cmd/ethrex/Cargo.toml index 9f72574aa5..47b71e4e3c 100644 --- a/cmd/ethrex/Cargo.toml +++ b/cmd/ethrex/Cargo.toml @@ -105,4 +105,5 @@ name = "build_block_benchmark" harness = false [build-dependencies] -vergen = { version = "9.0.0", features = ["rustc"] } +vergen-git2 = { version = "1.0.7", features = ["rustc"] } + diff --git a/cmd/ethrex/build.rs b/cmd/ethrex/build.rs index 827975d605..4b1a6b1742 100644 --- a/cmd/ethrex/build.rs +++ b/cmd/ethrex/build.rs @@ -1,14 +1,25 @@ use std::error::Error; -use vergen::{Emitter, RustcBuilder}; +use vergen_git2::{Emitter, Git2Builder, RustcBuilder}; // This build code is needed to add some env vars in order to construct the node client version -// VERGEN_RUSTC_COMMIT_HASH to get the commit hash // VERGEN_RUSTC_HOST_TRIPLE to get the build OS // VERGEN_RUSTC_SEMVER to get the rustc version +// VERGEN_GIT_BRANCH to get the git branch name +// VERGEN_GIT_SHA to get the git commit hash fn main() -> Result<(), Box> { - let rustc = RustcBuilder::all_rustc()?; + // Export build OS and rustc version as environment variables + let rustc = RustcBuilder::default() + .semver(true) + .host_triple(true) + .build()?; - Emitter::default().add_instructions(&rustc)?.emit()?; + // Export git commit hash and branch name as environment variables + let git2 = Git2Builder::default().branch(true).sha(true).build()?; + + Emitter::default() + .add_instructions(&rustc)? + .add_instructions(&git2)? + .emit()?; Ok(()) } diff --git a/cmd/ethrex/utils.rs b/cmd/ethrex/utils.rs index 2709a58e09..8dd20e1b10 100644 --- a/cmd/ethrex/utils.rs +++ b/cmd/ethrex/utils.rs @@ -153,10 +153,11 @@ pub fn parse_hex(s: &str) -> eyre::Result { pub fn get_client_version() -> String { format!( - "{}/v{}-develop-{}/{}/rustc-v{}", + "{}/v{}-{}-{}/{}/rustc-v{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION"), - &env!("VERGEN_RUSTC_COMMIT_HASH")[0..6], + env!("VERGEN_GIT_BRANCH"), + env!("VERGEN_GIT_SHA"), env!("VERGEN_RUSTC_HOST_TRIPLE"), env!("VERGEN_RUSTC_SEMVER") ) diff --git a/crates/l2/Cargo.toml b/crates/l2/Cargo.toml index 84695eac08..77e9c9f243 100644 --- a/crates/l2/Cargo.toml +++ b/crates/l2/Cargo.toml @@ -49,6 +49,9 @@ zkvm_interface = { path = "./prover/zkvm/interface/" } [dev-dependencies] rand = "0.8.5" +[build-dependencies] +vergen-git2 = { version = "1.0.7"} + [lib] path = "./l2.rs" diff --git a/crates/l2/build.rs b/crates/l2/build.rs new file mode 100644 index 0000000000..49853122b5 --- /dev/null +++ b/crates/l2/build.rs @@ -0,0 +1,13 @@ +use std::error::Error; +use vergen_git2::{Emitter, Git2Builder}; + +// This build code is needed to add some env vars in order to construct the code version +// VERGEN_GIT_SHA to get the git commit hash + +fn main() -> Result<(), Box> { + // Export git commit hash and branch name as environment variables + let git2 = Git2Builder::default().sha(true).build()?; + + Emitter::default().add_instructions(&git2)?.emit()?; + Ok(()) +} diff --git a/crates/l2/prover/Cargo.toml b/crates/l2/prover/Cargo.toml index 4b002636d6..e4994ddfe9 100644 --- a/crates/l2/prover/Cargo.toml +++ b/crates/l2/prover/Cargo.toml @@ -2,6 +2,7 @@ name = "ethrex-prover" version.workspace = true edition.workspace = true +build = "build.rs" [dependencies] serde_json.workspace = true @@ -42,6 +43,9 @@ sp1-recursion-gnark-ffi = { version = "5.0.0", optional = true } [dev-dependencies] ethrex-storage.workspace = true +[build-dependencies] +vergen-git2 = { version = "1.0.7"} + [lib] name = "ethrex_prover_lib" path = "src/lib.rs" diff --git a/crates/l2/prover/build.rs b/crates/l2/prover/build.rs new file mode 100644 index 0000000000..49853122b5 --- /dev/null +++ b/crates/l2/prover/build.rs @@ -0,0 +1,13 @@ +use std::error::Error; +use vergen_git2::{Emitter, Git2Builder}; + +// This build code is needed to add some env vars in order to construct the code version +// VERGEN_GIT_SHA to get the git commit hash + +fn main() -> Result<(), Box> { + // Export git commit hash and branch name as environment variables + let git2 = Git2Builder::default().sha(true).build()?; + + Emitter::default().add_instructions(&git2)?.emit()?; + Ok(()) +} diff --git a/crates/l2/prover/src/prover.rs b/crates/l2/prover/src/prover.rs index 42dfc06bcf..ece35e46ed 100644 --- a/crates/l2/prover/src/prover.rs +++ b/crates/l2/prover/src/prover.rs @@ -1,5 +1,5 @@ use crate::{config::ProverConfig, prove, to_batch_proof}; -use ethrex_l2::sequencer::proof_coordinator::ProofData; +use ethrex_l2::sequencer::proof_coordinator::{ProofData, get_commit_hash}; use ethrex_l2_common::prover::BatchProof; use std::time::Duration; use tokio::{ @@ -24,6 +24,7 @@ struct Prover { prover_server_endpoint: String, proving_time_ms: u64, aligned_mode: bool, + commit_hash: String, } impl Prover { @@ -32,6 +33,7 @@ impl Prover { prover_server_endpoint: format!("{}:{}", cfg.http_addr, cfg.http_port), proving_time_ms: cfg.proving_time_ms, aligned_mode: cfg.aligned_mode, + commit_hash: get_commit_hash(), } } @@ -43,10 +45,15 @@ impl Prover { let Ok(prover_data) = self .request_new_input() .await - .inspect_err(|e| warn!("Failed to request new data: {e}")) + .inspect_err(|e| error!("Failed to request new data: {e}")) else { continue; }; + + let Some(prover_data) = prover_data else { + continue; + }; + // If we get the input // Generate the Proof let Ok(batch_proof) = prove(prover_data.input, self.aligned_mode) @@ -65,30 +72,36 @@ impl Prover { } } - async fn request_new_input(&self) -> Result { + async fn request_new_input(&self) -> Result, String> { // Request the input with the correct batch_number - let request = ProofData::batch_request(); + let request = ProofData::batch_request(self.commit_hash.clone()); let response = connect_to_prover_server_wr(&self.prover_server_endpoint, &request) .await .map_err(|e| format!("Failed to get Response: {e}"))?; - let ProofData::BatchResponse { - batch_number, - input, - } = response - else { - return Err("Expecting ProofData::Response".to_owned()); + let (batch_number, input) = match response { + ProofData::BatchResponse { + batch_number, + input, + } => (batch_number, input), + ProofData::InvalidCodeVersion { commit_hash } => { + return Err(format!( + "Invalid code version received. Server commit_hash: {}, Prover commit_hash: {}", + commit_hash, self.commit_hash + )); + } + _ => return Err("Expecting ProofData::Response".to_owned()), }; let (Some(batch_number), Some(input)) = (batch_number, input) else { - return Err( - "Received Empty Response, meaning that the ProverServer doesn't have batches to prove.\nThe Prover may be advancing faster than the Proposer." - .to_owned(), - ); + warn!( + "Received Empty Response, meaning that the ProverServer doesn't have batches to prove.\nThe Prover may be advancing faster than the Proposer." + ); + return Ok(None); }; info!("Received Response for batch_number: {batch_number}"); - Ok(ProverData { + Ok(Some(ProverData { batch_number, input: ProgramInput { blocks: input.blocks, @@ -99,7 +112,7 @@ impl Prover { #[cfg(feature = "l2")] blob_proof: input.blob_proof, }, - }) + })) } async fn submit_proof(&self, batch_number: u64, batch_proof: BatchProof) -> Result<(), String> { diff --git a/crates/l2/sequencer/proof_coordinator.rs b/crates/l2/sequencer/proof_coordinator.rs index ae76840f24..1df1ef2c7d 100644 --- a/crates/l2/sequencer/proof_coordinator.rs +++ b/crates/l2/sequencer/proof_coordinator.rs @@ -62,9 +62,15 @@ pub enum ProofData { /// 3. /// The Client initiates the connection with a BatchRequest. /// Asking for the ProverInputData the prover_server considers/needs. - BatchRequest, + /// The commit hash is used to ensure the client and server are compatible. + BatchRequest { commit_hash: String }, /// 4. + /// The Server responds with an InvalidCodeVersion if the code version is not compatible. + /// The Client should then update its code to match the server's version. + InvalidCodeVersion { commit_hash: String }, + + /// 5. /// The Server responds with a BatchResponse containing the ProverInputData. /// If the BatchResponse is ProofData::BatchResponse{None, None}, /// the Client knows the BatchRequest couldn't be performed. @@ -73,20 +79,20 @@ pub enum ProofData { input: Option, }, - /// 5. + /// 6. /// The Client submits the zk Proof generated by the prover for the specified batch. ProofSubmit { batch_number: u64, batch_proof: BatchProof, }, - /// 6. + /// 7. /// The Server acknowledges the receipt of the proof and updates its state, ProofSubmitACK { batch_number: u64 }, } impl ProofData { - /// Builder function for creating a ProofSubmitAck + /// Builder function for creating a ProverSetup pub fn prover_setup(prover_type: ProverType, payload: Bytes) -> Self { ProofData::ProverSetup { prover_type, @@ -94,17 +100,22 @@ impl ProofData { } } - /// Builder function for creating a ProofSubmitAck + /// Builder function for creating a ProverSetupACK pub fn prover_setup_ack() -> Self { ProofData::ProverSetupACK } /// Builder function for creating a BatchRequest - pub fn batch_request() -> Self { - ProofData::BatchRequest + pub fn batch_request(commit_hash: String) -> Self { + ProofData::BatchRequest { commit_hash } } - /// Builder function for creating a BlockResponse + /// Builder function for creating a InvalidCodeVersion + pub fn invalid_code_version(commit_hash: String) -> Self { + ProofData::InvalidCodeVersion { commit_hash } + } + + /// Builder function for creating a BatchResponse pub fn batch_response(batch_number: u64, input: ProverInputData) -> Self { ProofData::BatchResponse { batch_number: Some(batch_number), @@ -133,6 +144,10 @@ impl ProofData { } } +pub fn get_commit_hash() -> String { + env!("VERGEN_GIT_SHA").to_string() +} + #[derive(Clone)] pub struct ProofCoordinatorState { listen_ip: IpAddr, @@ -147,6 +162,7 @@ pub struct ProofCoordinatorState { blockchain: Arc, validium: bool, needed_proof_types: Vec, + commit_hash: String, } impl ProofCoordinatorState { @@ -193,6 +209,7 @@ impl ProofCoordinatorState { blockchain, validium: config.validium, needed_proof_types, + commit_hash: get_commit_hash(), }) } } @@ -368,12 +385,12 @@ async fn handle_connection( Ok(mut stream) => { stream.read_to_end(&mut buffer).await?; - let data: Result = serde_json::from_slice(&buffer); - match data { - Ok(ProofData::BatchRequest) => { - if let Err(e) = handle_request(state, &mut stream).await { - error!("Failed to handle BatchRequest: {e}"); - } + + let data: Result = serde_json::from_slice(&buffer); + match data { + Ok(ProofData::BatchRequest { commit_hash }) => { + if let Err(e) = handle_request(state, &mut stream, commit_hash).await { + error!("Failed to handle BatchRequest: {e}"); } } Ok(ProofData::ProofSubmit { batch_number, @@ -409,9 +426,22 @@ async fn handle_connection( async fn handle_request( state: &ProofCoordinatorState, stream: &mut TcpStream, + commit_hash: String, ) -> Result<(), ProofCoordinatorError> { info!("BatchRequest received"); + if commit_hash != state.commit_hash { + error!( + "Code version mismatch: expected {}, got {}", + state.commit_hash, commit_hash + ); + + let response = ProofData::invalid_code_version(state.commit_hash.clone()); + send_response(stream, &response).await?; + info!("InvalidCodeVersion sent"); + return Ok(()); + } + let batch_to_verify = 1 + get_latest_sent_batch( state.needed_proof_types.clone(), &state.rollup_store, @@ -422,22 +452,18 @@ async fn handle_request( .map_err(|err| ProofCoordinatorError::InternalError(err.to_string()))?; let response = if !state.rollup_store.contains_batch(&batch_to_verify).await? { - let response = ProofData::empty_batch_response(); debug!("Sending empty BatchResponse"); - response + ProofData::empty_batch_response() } else { let input = create_prover_input(state, batch_to_verify).await?; - let response = ProofData::batch_response(batch_to_verify, input); debug!("Sending BatchResponse for block_number: {batch_to_verify}"); - response + ProofData::batch_response(batch_to_verify, input) }; - let buffer = serde_json::to_vec(&response)?; - stream - .write_all(&buffer) - .await - .map_err(ProofCoordinatorError::ConnectionError) - .map(|_| info!("BatchResponse sent for batch number: {batch_to_verify}")) + send_response(stream, &response).await?; + info!("BatchResponse sent for batch number: {batch_to_verify}"); + + Ok(()) } async fn handle_submit( @@ -468,15 +494,10 @@ async fn handle_submit( .store_proof_by_batch_and_type(batch_number, prover_type, batch_proof) .await?; } - let response = ProofData::proof_submit_ack(batch_number); - - let buffer = serde_json::to_vec(&response)?; - stream - .write_all(&buffer) - .await - .map_err(ProofCoordinatorError::ConnectionError) - .map(|_| info!("ProofSubmit ACK sent")) + send_response(stream, &response).await?; + info!("ProofSubmit ACK sent"); + Ok(()) } async fn handle_setup( @@ -512,12 +533,9 @@ async fn handle_setup( let response = ProofData::prover_setup_ack(); - let buffer = serde_json::to_vec(&response)?; - stream - .write_all(&buffer) - .await - .map_err(ProofCoordinatorError::ConnectionError) - .map(|_| info!("ProverSetupACK sent")) + send_response(stream, &response).await?; + info!("ProverSetupACK sent"); + Ok(()) } async fn create_prover_input( @@ -595,3 +613,15 @@ async fn fetch_blocks( } Ok(blocks) } + +async fn send_response( + stream: &mut TcpStream, + response: &ProofData, +) -> Result<(), ProofCoordinatorError> { + let buffer = serde_json::to_vec(response)?; + stream + .write_all(&buffer) + .await + .map_err(ProofCoordinatorError::ConnectionError)?; + Ok(()) +} diff --git a/crates/l2/tee/quote-gen/Cargo.lock b/crates/l2/tee/quote-gen/Cargo.lock index 26dbf77bd1..5532944d1c 100644 --- a/crates/l2/tee/quote-gen/Cargo.lock +++ b/crates/l2/tee/quote-gen/Cargo.lock @@ -1520,6 +1520,37 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.101", +] + [[package]] name = "derive_more" version = "1.0.0" @@ -2248,6 +2279,7 @@ dependencies = [ "tokio", "tokio-util", "tracing", + "vergen-git2", "zkvm_interface", ] @@ -2815,6 +2847,19 @@ version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +[[package]] +name = "git2" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2deb07a133b1520dc1a5690e9bd08950108873d7ed5de38dcc74d3b5ebffa110" +dependencies = [ + "bitflags 2.9.1", + "libc", + "libgit2-sys", + "log", + "url", +] + [[package]] name = "glob" version = "0.3.2" @@ -3681,6 +3726,18 @@ version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +[[package]] +name = "libgit2-sys" +version = "0.18.2+1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c42fe03df2bd3c53a3a9c7317ad91d80c81cd1fb0caec8d7cc4cd2bfa10c222" +dependencies = [ + "cc", + "libc", + "libz-sys", + "pkg-config", +] + [[package]] name = "libm" version = "0.2.15" @@ -3709,6 +3766,18 @@ dependencies = [ "escape8259", ] +[[package]] +name = "libz-sys" +version = "1.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linux-raw-sys" version = "0.9.4" @@ -3976,6 +4045,15 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", +] + [[package]] name = "nybbles" version = "0.3.4" @@ -4645,6 +4723,7 @@ dependencies = [ "serde", "serde_json", "tokio", + "vergen-git2", "zerocopy", "zkvm_interface", ] @@ -6071,7 +6150,9 @@ checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", + "libc", "num-conv", + "num_threads", "powerfmt", "serde", "time-core", @@ -6575,6 +6656,44 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "vergen" +version = "9.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777" +dependencies = [ + "anyhow", + "derive_builder", + "rustversion", + "vergen-lib", +] + +[[package]] +name = "vergen-git2" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f6ee511ec45098eabade8a0750e76eec671e7fb2d9360c563911336bea9cac1" +dependencies = [ + "anyhow", + "derive_builder", + "git2", + "rustversion", + "time", + "vergen", + "vergen-lib", +] + +[[package]] +name = "vergen-lib" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b07e6010c0f3e59fcb164e0163834597da68d1f864e2b8ca49f74de01e9c166" +dependencies = [ + "anyhow", + "derive_builder", + "rustversion", +] + [[package]] name = "version_check" version = "0.9.5" diff --git a/crates/l2/tee/quote-gen/Cargo.toml b/crates/l2/tee/quote-gen/Cargo.toml index 2dbd558f2d..7927d66728 100644 --- a/crates/l2/tee/quote-gen/Cargo.toml +++ b/crates/l2/tee/quote-gen/Cargo.toml @@ -2,6 +2,7 @@ name = "quote-gen" version = "0.1.0" edition = "2024" +build = "build.rs" [dependencies] configfs-tsm = "0.0.1" @@ -31,6 +32,10 @@ secp256k1 = { version = "0.29.1", default-features = false, features = [ "std" ]} +[build-dependencies] +vergen-git2 = { version = "1.0.7"} + + [workspace] [features] diff --git a/crates/l2/tee/quote-gen/build.rs b/crates/l2/tee/quote-gen/build.rs new file mode 100644 index 0000000000..49853122b5 --- /dev/null +++ b/crates/l2/tee/quote-gen/build.rs @@ -0,0 +1,13 @@ +use std::error::Error; +use vergen_git2::{Emitter, Git2Builder}; + +// This build code is needed to add some env vars in order to construct the code version +// VERGEN_GIT_SHA to get the git commit hash + +fn main() -> Result<(), Box> { + // Export git commit hash and branch name as environment variables + let git2 = Git2Builder::default().sha(true).build()?; + + Emitter::default().add_instructions(&git2)?.emit()?; + Ok(()) +} diff --git a/crates/l2/tee/quote-gen/src/main.rs b/crates/l2/tee/quote-gen/src/main.rs index f6e6838f6a..6b967796b5 100644 --- a/crates/l2/tee/quote-gen/src/main.rs +++ b/crates/l2/tee/quote-gen/src/main.rs @@ -1,22 +1,20 @@ -use configfs_tsm::create_tdx_quote; - -use std::time::Duration; -use tokio::time::sleep; - -use ethrex_common::Bytes; -use ethrex_l2_sdk::calldata::encode_tuple; -use ethrex_l2_sdk::get_address_from_secret_key; -use zkvm_interface::io::ProgramInput; - -use keccak_hash::keccak; -use secp256k1::{generate_keypair, rand, Message, SecretKey}; mod sender; -use sender::{get_batch, submit_proof, submit_quote}; +use configfs_tsm::create_tdx_quote; +use ethrex_common::Bytes; +use ethrex_l2::sequencer::proof_coordinator::get_commit_hash; use ethrex_l2_common::{ calldata::Value, prover::{BatchProof, ProofCalldata, ProverType}, }; +use ethrex_l2_sdk::calldata::encode_tuple; +use ethrex_l2_sdk::get_address_from_secret_key; +use keccak_hash::keccak; +use secp256k1::{Message, SecretKey, generate_keypair, rand}; +use sender::{get_batch, submit_proof, submit_quote}; +use std::time::Duration; +use tokio::time::sleep; +use zkvm_interface::io::ProgramInput; const POLL_INTERVAL_MS: u64 = 5000; @@ -85,8 +83,8 @@ fn get_quote(private_key: &SecretKey) -> Result { .map(Bytes::from) } -async fn do_loop(private_key: &SecretKey) -> Result { - let (batch_number, input) = get_batch().await?; +async fn do_loop(private_key: &SecretKey, commit_hash: String) -> Result { + let (batch_number, input) = get_batch(commit_hash).await?; let output = calculate_transition(input)?; let signature = sign_eip191(&output, private_key); let calldata = ProofCalldata { @@ -108,13 +106,14 @@ async fn setup(private_key: &SecretKey) -> Result<(), String> { #[tokio::main] async fn main() { let (private_key, _) = generate_keypair(&mut rand::rngs::OsRng); + let commit_hash = get_commit_hash(); while let Err(err) = setup(&private_key).await { println!("Error sending quote: {}", err); sleep(Duration::from_millis(POLL_INTERVAL_MS)).await; } loop { sleep(Duration::from_millis(POLL_INTERVAL_MS)).await; - match do_loop(&private_key).await { + match do_loop(&private_key, commit_hash.clone()).await { Ok(batch_number) => println!("Processed batch {}", batch_number), Err(err) => println!("Error: {}", err), }; diff --git a/crates/l2/tee/quote-gen/src/sender.rs b/crates/l2/tee/quote-gen/src/sender.rs index fd808cb3f6..560edc92fe 100644 --- a/crates/l2/tee/quote-gen/src/sender.rs +++ b/crates/l2/tee/quote-gen/src/sender.rs @@ -12,10 +12,12 @@ use ethrex_common::Bytes; const SERVER_URL: &str = "172.17.0.1:3900"; const SERVER_URL_DEV: &str = "localhost:3900"; -pub async fn get_batch() -> Result<(u64, ProgramInput), String> { - let batch = connect_to_prover_server_wr(&ProofData::BatchRequest) - .await - .map_err(|e| format!("Failed to get Response: {e}"))?; +pub async fn get_batch(commit_hash: String) -> Result<(u64, ProgramInput), String> { + let batch = connect_to_prover_server_wr(&ProofData::BatchRequest { + commit_hash: commit_hash.clone(), + }) + .await + .map_err(|e| format!("Failed to get Response: {e}"))?; match batch { ProofData::BatchResponse { batch_number, @@ -35,6 +37,12 @@ pub async fn get_batch() -> Result<(u64, ProgramInput), String> { )), _ => Err("No blocks to prove.".to_owned()), }, + ProofData::InvalidCodeVersion { + commit_hash: server_code_version, + } => Err(format!( + "Invalid code version received. Server code: {}, Prover code: {}", + server_code_version, commit_hash + )), _ => Err("Expecting ProofData::Response".to_owned()), } } From cc27f5f40f7f64237b1f79be8b876189239ba315 Mon Sep 17 00:00:00 2001 From: Ivan Litteri <67517699+ilitteri@users.noreply.github.com> Date: Mon, 30 Jun 2025 18:11:30 -0300 Subject: [PATCH 30/40] refactor(l1, l2, levm): remove `l2` feature flag from crates `ethrex-vm` and `ethrex-levm` (#3367) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Motivation** My primary goal was to remove the `l2` feature flag from `cmd/ethrex` but to do this, we first need to remove it from: - `ethrex-vm`. - `ethrex-levm`. - `ethrex-blockchain`. **Description** This PR removes the feature flag `l2` from crates `ethrex-vm` and `ethrex-levm`. > *TL;DR:* > - In `ethrex-vm` the l2 precompiles logic was moved to a separate module, `l2_precompiles`. > - A new `VMType` enum was introduced in `ethrex-levm` as a field of `VM` (main LEVM's struct). It is used by LEVM to behave differently where needed (this is specifically, when executing precompiles, and when executing hooks). > - A new `BlockchainType` enum was introduced in `ethrex-blockchain` as a field of the struct `Blockchain` to differentiate when nodes are started as L1 or L2 nodes (this is later used in the code to instantiate the VM properly, matching the `BlockchainType` variants with `VMType` ones). The `l2` feature flag exists in `ethrex-vm` only because of `ethrex-levm`, so to remove it I needed to remove it from `ethrex-levm` first. The following commits do that: - [Move l2 precompiles logic to new module](https://github.com/lambdaclass/ethrex/commit/28843a6b7b7bee0cacc95589e66190bdae510f94) - [Remove feature flag from hooks public API](https://github.com/lambdaclass/ethrex/commit/39a509fc7046dd2ffb34c405db89e1f38aead490) - [Use the correct functions](https://github.com/lambdaclass/ethrex/commit/3023b88d96455337f6b1fb6d34ea2c6d087b3518) - [Replace get_hooks](https://github.com/lambdaclass/ethrex/commit/88bc9a25691b06663600e4afe75f30332517f039) - [Remove l2 feature flag from levm](https://github.com/lambdaclass/ethrex/commit/8b098836b23fcdee1c85294d33090cd30f77c689) After that, it was almost safe to remove it from `ethrex-vm`: - [Remove l2 feature flag from vm crate](https://github.com/lambdaclass/ethrex/commit/fd971bec15d0934ccde5f6d25b16a4d16d0693df) This brought some compilation errors that were solved in: - [Implement BlockchainType and fix compilation](https://github.com/lambdaclass/ethrex/commit/32557eb7cabcefc935f2d525354ab981870af45f) **Next Steps** - Remove feature flag `l2` from `ethrex-blockchain` crate. - Remove feature flag `l2` from `cmd/ethrex`. --------- Co-authored-by: Jeremías Salomón <48994069+JereSalo@users.noreply.github.com> --- cmd/ef_tests/blockchain/test_runner.rs | 7 +- cmd/ef_tests/state/Cargo.toml | 1 - cmd/ef_tests/state/runner/levm_runner.rs | 3 +- cmd/ethrex/Cargo.toml | 1 - cmd/ethrex/bench/build_block_benchmark.rs | 8 +- cmd/ethrex/bench/import_blocks_benchmark.rs | 6 +- cmd/ethrex/cli.rs | 16 +- cmd/ethrex/ethrex.rs | 3 +- cmd/ethrex/initializers.rs | 10 +- cmd/ethrex/l2/command.rs | 4 +- cmd/ethrex_replay/Cargo.toml | 2 +- cmd/ethrex_replay/src/cli.rs | 5 +- cmd/ethrex_replay/src/run.rs | 21 ++- crates/blockchain/blockchain.rs | 31 +++- crates/blockchain/payload.rs | 22 ++- crates/blockchain/tracing.rs | 12 +- crates/l2/Cargo.toml | 4 +- crates/l2/based/block_fetcher.rs | 3 +- crates/l2/prover/Cargo.toml | 1 - crates/l2/prover/zkvm/interface/Cargo.toml | 2 +- .../l2/prover/zkvm/interface/risc0/Cargo.toml | 2 +- .../l2/prover/zkvm/interface/sp1/Cargo.toml | 2 +- .../l2/prover/zkvm/interface/src/execution.rs | 7 +- .../block_producer/payload_builder.rs | 9 +- crates/l2/sequencer/l1_committer.rs | 12 +- crates/l2/sequencer/mod.rs | 1 + crates/l2/tee/quote-gen/Cargo.toml | 11 +- crates/l2/utils/prover/save_state.rs | 4 +- crates/networking/rpc/eth/transaction.rs | 8 +- crates/vm/Cargo.toml | 1 - crates/vm/backends/levm/mod.rs | 128 ++++++++++------ crates/vm/backends/levm/tracing.rs | 15 +- crates/vm/backends/mod.rs | 72 +++++++-- crates/vm/backends/revm/mod.rs | 31 +--- crates/vm/backends/revm/tracing.rs | 18 +-- crates/vm/levm/Cargo.toml | 1 - .../bench/revm_comparison/src/levm_bench.rs | 8 +- crates/vm/levm/src/gas_cost.rs | 2 - crates/vm/levm/src/hooks/hook.rs | 31 ++-- crates/vm/levm/src/hooks/mod.rs | 2 - crates/vm/levm/src/l2_precompiles.rs | 142 +++++++++++++++++ crates/vm/levm/src/lib.rs | 1 + crates/vm/levm/src/precompiles.rs | 143 +----------------- crates/vm/levm/src/utils.rs | 11 +- crates/vm/levm/src/vm.rs | 28 +++- crates/vm/levm/tests/tests.rs | 13 +- crates/vm/tracing.rs | 13 +- 47 files changed, 522 insertions(+), 356 deletions(-) create mode 100644 crates/vm/levm/src/l2_precompiles.rs diff --git a/cmd/ef_tests/blockchain/test_runner.rs b/cmd/ef_tests/blockchain/test_runner.rs index bc17f813ce..39b610a0dd 100644 --- a/cmd/ef_tests/blockchain/test_runner.rs +++ b/cmd/ef_tests/blockchain/test_runner.rs @@ -5,7 +5,7 @@ use crate::{ types::{BlockChainExpectedException, BlockExpectedException, BlockWithRLP, TestUnit}, }; use ethrex_blockchain::{ - Blockchain, + Blockchain, BlockchainType, error::{ChainError, InvalidBlockError}, fork_choice::apply_fork_choice, }; @@ -69,7 +69,10 @@ pub async fn run_ef_test(test_key: &str, test: &TestUnit, evm: EvmEngine) -> Res // Check world_state check_prestate_against_db(test_key, test, &store); - let blockchain = Blockchain::new(evm, store.clone()); + // Blockchain EF tests are meant for L1. + let blockchain_type = BlockchainType::L1; + + let blockchain = Blockchain::new(evm, store.clone(), blockchain_type); // Execute all blocks in test for block_fixture in test.blocks.iter() { let expects_exception = block_fixture.expect_exception.is_some(); diff --git a/cmd/ef_tests/state/Cargo.toml b/cmd/ef_tests/state/Cargo.toml index 821c9b4c4e..6789b5f332 100644 --- a/cmd/ef_tests/state/Cargo.toml +++ b/cmd/ef_tests/state/Cargo.toml @@ -42,7 +42,6 @@ path = "./lib.rs" default = ["c-kzg", "blst"] c-kzg = ["ethrex-vm/c-kzg", "ethrex-levm/c-kzg", "ethrex-common/c-kzg"] blst = ["ethrex-vm/blst"] -l2 = ["ethrex-vm/l2", "ethrex-levm/l2"] [[test]] name = "all" diff --git a/cmd/ef_tests/state/runner/levm_runner.rs b/cmd/ef_tests/state/runner/levm_runner.rs index b4d610033c..6b7ad34a39 100644 --- a/cmd/ef_tests/state/runner/levm_runner.rs +++ b/cmd/ef_tests/state/runner/levm_runner.rs @@ -16,7 +16,7 @@ use ethrex_levm::{ db::gen_db::GeneralizedDatabase, errors::{ExecutionReport, TxValidationError, VMError}, tracing::LevmCallTracer, - vm::VM, + vm::{VM, VMType}, }; use ethrex_rlp::encode::RLPEncode; use ethrex_vm::backends; @@ -201,6 +201,7 @@ pub fn prepare_vm_for_tx<'a>( db, &tx, LevmCallTracer::disabled(), + VMType::L1, // TODO: Should we run the EF tests with L2? )) } diff --git a/cmd/ethrex/Cargo.toml b/cmd/ethrex/Cargo.toml index 47b71e4e3c..4deea41826 100644 --- a/cmd/ethrex/Cargo.toml +++ b/cmd/ethrex/Cargo.toml @@ -75,7 +75,6 @@ l2 = [ "dep:ethrex-l2-common", "dep:ethrex-sdk", "dep:ethrex-l2-rpc", - "ethrex-vm/l2", "ethrex-blockchain/l2", "ethrex-storage-rollup", ] diff --git a/cmd/ethrex/bench/build_block_benchmark.rs b/cmd/ethrex/bench/build_block_benchmark.rs index 8d71a9e9a2..9f245bedce 100644 --- a/cmd/ethrex/bench/build_block_benchmark.rs +++ b/cmd/ethrex/bench/build_block_benchmark.rs @@ -10,7 +10,7 @@ use criterion::{ measurement::{Measurement, ValueFormatter}, }; use ethrex_blockchain::{ - Blockchain, + Blockchain, BlockchainType, payload::{BuildPayloadArgs, PayloadBuildResult, create_payload}, }; use ethrex_common::{ @@ -261,7 +261,11 @@ pub fn build_block_benchmark(c: &mut Criterion) { .collect(); let (store_with_genesis, genesis) = setup_genesis(&addresses).await; - let block_chain = Blockchain::new(EvmEngine::LEVM, store_with_genesis.clone()); + let block_chain = Blockchain::new( + EvmEngine::LEVM, + store_with_genesis.clone(), + BlockchainType::L1, // TODO: Should we support L2? + ); fill_mempool(&block_chain, accounts).await; (block_chain, genesis.get_block(), store_with_genesis) diff --git a/cmd/ethrex/bench/import_blocks_benchmark.rs b/cmd/ethrex/bench/import_blocks_benchmark.rs index c579e64eca..835de5845f 100644 --- a/cmd/ethrex/bench/import_blocks_benchmark.rs +++ b/cmd/ethrex/bench/import_blocks_benchmark.rs @@ -5,6 +5,7 @@ use ethrex::{ networks::Network, utils::set_datadir, }; +use ethrex_blockchain::BlockchainType; use ethrex_vm::EvmEngine; #[inline] @@ -15,7 +16,9 @@ fn block_import() { let evm_engine = EvmEngine::default(); - let network = Network::from("../../fixtures/genesis/perf-ci.json"); + let blockchain_type = BlockchainType::default(); // TODO: Should we support L2? + + let network = Network::from("../../test_data/genesis-perf-ci.json"); let genesis = network .get_genesis() .expect("Failed to generate genesis from file"); @@ -25,6 +28,7 @@ fn block_import() { data_dir, genesis, evm_engine, + blockchain_type, )) .expect("Failed to import blocks on the Tokio runtime"); } diff --git a/cmd/ethrex/cli.rs b/cmd/ethrex/cli.rs index 566e50016c..a768f860e0 100644 --- a/cmd/ethrex/cli.rs +++ b/cmd/ethrex/cli.rs @@ -6,7 +6,7 @@ use std::{ }; use clap::{ArgAction, Parser as ClapParser, Subcommand as ClapSubcommand}; -use ethrex_blockchain::error::ChainError; +use ethrex_blockchain::{BlockchainType, error::ChainError}; use ethrex_common::types::Genesis; use ethrex_p2p::{sync::SyncMode, types::Node}; use ethrex_rlp::encode::RLPEncode; @@ -241,6 +241,8 @@ pub enum Subcommand { path: String, #[arg(long = "removedb", action = ArgAction::SetTrue)] removedb: bool, + #[arg(long, action = ArgAction::SetTrue)] + l2: bool, }, #[command( name = "export", @@ -290,7 +292,7 @@ impl Subcommand { Subcommand::RemoveDB { datadir, force } => { remove_db(&datadir, force); } - Subcommand::Import { path, removedb } => { + Subcommand::Import { path, removedb, l2 } => { if removedb { Box::pin(async { Self::RemoveDB { @@ -305,7 +307,12 @@ impl Subcommand { let network = &opts.network; let genesis = network.get_genesis()?; - import_blocks(&path, &opts.datadir, genesis, opts.evm).await?; + let blockchain_type = if l2 { + BlockchainType::L2 + } else { + BlockchainType::L1 + }; + import_blocks(&path, &opts.datadir, genesis, opts.evm, blockchain_type).await?; } Subcommand::Export { path, first, last } => { export_blocks(&path, &opts.datadir, first, last).await @@ -354,10 +361,11 @@ pub async fn import_blocks( data_dir: &str, genesis: Genesis, evm: EvmEngine, + blockchain_type: BlockchainType, ) -> Result<(), ChainError> { let data_dir = set_datadir(data_dir); let store = init_store(&data_dir, genesis).await; - let blockchain = init_blockchain(evm, store.clone()); + let blockchain = init_blockchain(evm, store.clone(), blockchain_type); let path_metadata = metadata(path).expect("Failed to read path"); let blocks = if path_metadata.is_dir() { let mut blocks = vec![]; diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index cd283663da..a7ad4e0bda 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -7,6 +7,7 @@ use ethrex::{ }, utils::{NodeConfigFile, set_datadir, store_node_config_file}, }; +use ethrex_blockchain::BlockchainType; use ethrex_p2p::{kademlia::KademliaTable, network::peer_table, types::NodeRecord}; #[cfg(feature = "sync-test")] use ethrex_storage::Store; @@ -78,7 +79,7 @@ async fn main() -> eyre::Result<()> { #[cfg(feature = "sync-test")] set_sync_block(&store).await; - let blockchain = init_blockchain(opts.evm, store.clone()); + let blockchain = init_blockchain(opts.evm, store.clone(), BlockchainType::L1); let signer = get_signer(&data_dir); diff --git a/cmd/ethrex/initializers.rs b/cmd/ethrex/initializers.rs index 1de04ae67b..0430656c87 100644 --- a/cmd/ethrex/initializers.rs +++ b/cmd/ethrex/initializers.rs @@ -3,7 +3,7 @@ use crate::{ networks::{self, Network, PublicNetwork}, utils::{get_client_version, parse_socket_addr, read_jwtsecret_file, read_node_config_file}, }; -use ethrex_blockchain::Blockchain; +use ethrex_blockchain::{Blockchain, BlockchainType}; use ethrex_common::types::Genesis; use ethrex_p2p::{ kademlia::KademliaTable, @@ -82,9 +82,13 @@ pub fn open_store(data_dir: &str) -> Store { } } -pub fn init_blockchain(evm_engine: EvmEngine, store: Store) -> Arc { +pub fn init_blockchain( + evm_engine: EvmEngine, + store: Store, + blockchain_type: BlockchainType, +) -> Arc { info!("Initiating blockchain with EVM: {}", evm_engine); - Blockchain::new(evm_engine, store).into() + Blockchain::new(evm_engine, store, blockchain_type).into() } #[allow(clippy::too_many_arguments)] diff --git a/cmd/ethrex/l2/command.rs b/cmd/ethrex/l2/command.rs index 2a29d9ff20..64d8ad86ad 100644 --- a/cmd/ethrex/l2/command.rs +++ b/cmd/ethrex/l2/command.rs @@ -10,6 +10,7 @@ use crate::{ utils::{NodeConfigFile, parse_private_key, set_datadir, store_node_config_file}, }; use clap::Subcommand; +use ethrex_blockchain::BlockchainType; use ethrex_common::{ Address, U256, types::{BYTES_PER_BLOB, BlobsBundle, BlockHeader, batch::Batch, bytes_from_blob}, @@ -136,7 +137,8 @@ impl Command { let store = init_store(&data_dir, genesis).await; let rollup_store = l2::initializers::init_rollup_store(&rollup_store_dir).await; - let blockchain = init_blockchain(opts.node_opts.evm, store.clone()); + let blockchain = + init_blockchain(opts.node_opts.evm, store.clone(), BlockchainType::L2); let signer = get_signer(&data_dir); diff --git a/cmd/ethrex_replay/Cargo.toml b/cmd/ethrex_replay/Cargo.toml index b8664c55fa..aeae120ebe 100644 --- a/cmd/ethrex_replay/Cargo.toml +++ b/cmd/ethrex_replay/Cargo.toml @@ -58,5 +58,5 @@ charming = { version = "0.4.0", features = ["ssr"] } risc0 = ["zkvm_interface/risc0", "ethrex-prover/risc0"] sp1 = ["zkvm_interface/sp1", "ethrex-prover/sp1"] gpu = ["ethrex-prover/gpu"] -l2 = ["ethrex-vm/l2", "zkvm_interface/l2", "ethrex-l2/l2"] +l2 = ["zkvm_interface/l2", "ethrex-l2/l2"] ci = [] diff --git a/cmd/ethrex_replay/src/cli.rs b/cmd/ethrex_replay/src/cli.rs index 5713eaca4d..41f34b1e92 100644 --- a/cmd/ethrex_replay/src/cli.rs +++ b/cmd/ethrex_replay/src/cli.rs @@ -70,6 +70,8 @@ enum SubcommandExecute { help = "Name or ChainID of the network to use" )] network: String, + #[arg(long, required = false)] + l2: bool, }, } @@ -117,11 +119,12 @@ impl SubcommandExecute { tx, rpc_url, network, + l2, } => { let chain_config = get_chain_config(&network)?; let block_number = get_tx_block(&tx, &rpc_url).await?; let cache = get_blockdata(&rpc_url, chain_config, block_number).await?; - let (receipt, transitions) = run_tx(cache, &tx).await?; + let (receipt, transitions) = run_tx(cache, &tx, l2).await?; print_receipt(receipt); for transition in transitions { print_transition(transition); diff --git a/cmd/ethrex_replay/src/run.rs b/cmd/ethrex_replay/src/run.rs index b59f1bd181..a6415ed9d1 100644 --- a/cmd/ethrex_replay/src/run.rs +++ b/cmd/ethrex_replay/src/run.rs @@ -1,6 +1,9 @@ use crate::cache::Cache; use ethrex_common::types::{AccountUpdate, ELASTICITY_MULTIPLIER, Receipt}; -use ethrex_levm::db::{CacheDB, gen_db::GeneralizedDatabase}; +use ethrex_levm::{ + db::{CacheDB, gen_db::GeneralizedDatabase}, + vm::VMType, +}; use ethrex_vm::{DynVmDatabase, Evm, EvmEngine, backends::levm::LEVM}; use eyre::Ok; use std::sync::Arc; @@ -58,7 +61,11 @@ pub async fn prove(cache: Cache) -> eyre::Result { Ok(serde_json::to_string(&out.0)?) } -pub async fn run_tx(cache: Cache, tx_id: &str) -> eyre::Result<(Receipt, Vec)> { +pub async fn run_tx( + cache: Cache, + tx_id: &str, + l2: bool, +) -> eyre::Result<(Receipt, Vec)> { let block = cache .blocks .first() @@ -67,16 +74,22 @@ pub async fn run_tx(cache: Cache, tx_id: &str) -> eyre::Result<(Receipt, Vec = Arc::new(Box::new(prover_db.clone())); let mut db = GeneralizedDatabase::new(store.clone(), CacheDB::new()); - LEVM::prepare_block(block, &mut db)?; + LEVM::prepare_block(block, &mut db, vm_type)?; LEVM::get_state_transitions(&mut db)? }; prover_db.apply_account_updates(&changes)?; for (tx, tx_sender) in block.body.get_transactions_with_sender()? { - let mut vm = Evm::new(EvmEngine::LEVM, prover_db.clone()); + let mut vm = if l2 { + Evm::new_for_l2(EvmEngine::LEVM, prover_db.clone())? + } else { + Evm::new_for_l1(EvmEngine::LEVM, prover_db.clone()) + }; let (receipt, _) = vm.execute_tx(tx, &block.header, &mut remaining_gas, tx_sender)?; let account_updates = vm.get_state_transitions()?; prover_db.apply_account_updates(&account_updates)?; diff --git a/crates/blockchain/blockchain.rs b/crates/blockchain/blockchain.rs index 02366b06f3..11027f5808 100644 --- a/crates/blockchain/blockchain.rs +++ b/crates/blockchain/blockchain.rs @@ -26,7 +26,7 @@ use ethrex_common::{Address, H256, TrieLogger}; use ethrex_metrics::metrics; use ethrex_storage::{Store, UpdateBatch, error::StoreError, hash_address, hash_key}; use ethrex_vm::backends::levm::db::DatabaseLogger; -use ethrex_vm::{BlockExecutionResult, DynVmDatabase, Evm, EvmEngine}; +use ethrex_vm::{BlockExecutionResult, DynVmDatabase, Evm, EvmEngine, EvmError}; use mempool::Mempool; use std::collections::HashMap; use std::sync::atomic::{AtomicBool, Ordering}; @@ -45,6 +45,13 @@ use ethrex_common::types::BlobsBundle; //TODO: Implement a struct Chain or BlockChain to encapsulate //functionality and canonical chain state and config +#[derive(Debug, Clone, Default)] +pub enum BlockchainType { + #[default] + L1, + L2, +} + #[derive(Debug)] pub struct Blockchain { pub evm_engine: EvmEngine, @@ -54,6 +61,7 @@ pub struct Blockchain { /// This will be set to true once the initial sync has taken place and wont be set to false after /// This does not reflect whether there is an ongoing sync process is_synced: AtomicBool, + pub r#type: BlockchainType, } #[derive(Debug, Clone)] @@ -62,12 +70,13 @@ pub struct BatchBlockProcessingFailure { pub failed_block_hash: H256, } impl Blockchain { - pub fn new(evm_engine: EvmEngine, store: Store) -> Self { + pub fn new(evm_engine: EvmEngine, store: Store, blockchain_type: BlockchainType) -> Self { Self { evm_engine, storage: store, mempool: Mempool::new(), is_synced: AtomicBool::new(false), + r#type: blockchain_type, } } @@ -77,6 +86,7 @@ impl Blockchain { storage: store, mempool: Mempool::new(), is_synced: AtomicBool::new(false), + r#type: BlockchainType::default(), } } @@ -98,7 +108,7 @@ impl Blockchain { validate_block(block, &parent_header, &chain_config, ELASTICITY_MULTIPLIER)?; let vm_db = StoreVmDatabase::new(self.storage.clone(), block.header.parent_hash); - let mut vm = Evm::new(self.evm_engine, vm_db); + let mut vm = self.new_evm(vm_db)?; let execution_result = vm.execute_block(block)?; let account_updates = vm.get_state_transitions()?; @@ -170,7 +180,10 @@ impl Blockchain { let vm_db: DynVmDatabase = Box::new(StoreVmDatabase::new(self.storage.clone(), parent_hash)); let logger = Arc::new(DatabaseLogger::new(Arc::new(Mutex::new(Box::new(vm_db))))); - let mut vm = Evm::new_from_db(logger.clone()); + let mut vm = match self.r#type { + BlockchainType::L1 => Evm::new_from_db_for_l1(logger.clone()), + BlockchainType::L2 => Evm::new_from_db_for_l2(logger.clone()), + }; // Re-execute block with logger vm.execute_block(block)?; @@ -442,7 +455,7 @@ impl Blockchain { first_block_header.parent_hash, block_hash_cache, ); - let mut vm = Evm::new(self.evm_engine, vm_db); + let mut vm = self.new_evm(vm_db).map_err(|e| (e.into(), None))?; let blocks_len = blocks.len(); let mut all_receipts: Vec<(BlockHash, Vec)> = Vec::with_capacity(blocks_len); @@ -776,6 +789,14 @@ impl Blockchain { Ok(result) } + + pub fn new_evm(&self, vm_db: StoreVmDatabase) -> Result { + let evm = match self.r#type { + BlockchainType::L1 => Evm::new_for_l1(self.evm_engine, vm_db), + BlockchainType::L2 => Evm::new_for_l2(self.evm_engine, vm_db)?, + }; + Ok(evm) + } } pub fn validate_requests_hash( diff --git a/crates/blockchain/payload.rs b/crates/blockchain/payload.rs index 66924038ee..94423892a6 100644 --- a/crates/blockchain/payload.rs +++ b/crates/blockchain/payload.rs @@ -30,7 +30,7 @@ use ethrex_metrics::metrics; use ethrex_metrics::metrics_transactions::{METRICS_TX, MetricsTxType}; use crate::{ - Blockchain, + Blockchain, BlockchainType, constants::{GAS_LIMIT_BOUND_DIVISOR, MIN_GAS_LIMIT, TX_GAS_COST}, error::{ChainError, InvalidBlockError}, mempool::PendingTxFilter, @@ -184,7 +184,12 @@ pub struct PayloadBuildContext { } impl PayloadBuildContext { - pub fn new(payload: Block, evm_engine: EvmEngine, storage: &Store) -> Result { + pub fn new( + payload: Block, + evm_engine: EvmEngine, + storage: &Store, + blockchain_type: BlockchainType, + ) -> Result { let config = storage .get_chain_config() .map_err(|e| EvmError::DB(e.to_string()))?; @@ -197,7 +202,10 @@ impl PayloadBuildContext { ); let vm_db = StoreVmDatabase::new(storage.clone(), payload.header.parent_hash); - let vm = Evm::new(evm_engine, vm_db); + let vm = match blockchain_type { + BlockchainType::L1 => Evm::new_for_l1(evm_engine, vm_db), + BlockchainType::L2 => Evm::new_for_l2(evm_engine, vm_db)?, + }; Ok(PayloadBuildContext { remaining_gas: payload.header.gas_limit, @@ -276,10 +284,12 @@ impl Blockchain { debug!("Building payload"); let base_fee = payload.header.base_fee_per_gas.unwrap_or_default(); - let mut context = PayloadBuildContext::new(payload, self.evm_engine, &self.storage)?; + let mut context = + PayloadBuildContext::new(payload, self.evm_engine, &self.storage, self.r#type.clone())?; - #[cfg(not(feature = "l2"))] - self.apply_system_operations(&mut context)?; + if let BlockchainType::L1 = self.r#type { + self.apply_system_operations(&mut context)?; + } self.apply_withdrawals(&mut context)?; self.fill_transactions(&mut context)?; self.extract_requests(&mut context)?; diff --git a/crates/blockchain/tracing.rs b/crates/blockchain/tracing.rs index ff89d962d9..06c1d78027 100644 --- a/crates/blockchain/tracing.rs +++ b/crates/blockchain/tracing.rs @@ -102,14 +102,12 @@ impl Blockchain { .iter() .map(|b| (b.header.number, b.hash())) .collect(); - let mut vm = Evm::new( - self.evm_engine, - StoreVmDatabase::new_with_block_hash_cache( - self.storage.clone(), - parent_hash, - block_hash_cache, - ), + let vm_db = StoreVmDatabase::new_with_block_hash_cache( + self.storage.clone(), + parent_hash, + block_hash_cache, ); + let mut vm = self.new_evm(vm_db)?; // Run parents to rebuild pre-state for block in blocks_to_re_execute.iter().rev() { vm.rerun_block(block, None)?; diff --git a/crates/l2/Cargo.toml b/crates/l2/Cargo.toml index 77e9c9f243..ac7b56611d 100644 --- a/crates/l2/Cargo.toml +++ b/crates/l2/Cargo.toml @@ -40,7 +40,7 @@ directories = "5.0.1" bincode = "1.3.3" serde_with = "3.11.0" lazy_static.workspace = true -aligned-sdk = { git = "https://github.com/yetanotherco/aligned_layer", tag="v0.16.1" } +aligned-sdk = { git = "https://github.com/yetanotherco/aligned_layer", tag = "v0.16.1" } ethers = "2.0" cfg-if.workspace = true @@ -66,4 +66,4 @@ panic = "deny" [features] default = ["l2"] metrics = ["ethrex-blockchain/metrics", "ethrex-metrics/l2"] -l2 = ["ethrex-blockchain/l2", "ethrex-vm/l2", "zkvm_interface/l2"] +l2 = ["ethrex-blockchain/l2", "zkvm_interface/l2"] diff --git a/crates/l2/based/block_fetcher.rs b/crates/l2/based/block_fetcher.rs index 99681d874a..102672463b 100644 --- a/crates/l2/based/block_fetcher.rs +++ b/crates/l2/based/block_fetcher.rs @@ -16,7 +16,6 @@ use ethrex_rlp::decode::RLPDecode; use ethrex_rpc::{EthClient, types::receipt::RpcLog}; use ethrex_storage::Store; use ethrex_storage_rollup::{RollupStoreError, StoreRollup}; -use ethrex_vm::{Evm, EvmEngine}; use keccak_hash::keccak; use spawned_concurrency::{ error::GenServerError, @@ -496,7 +495,7 @@ async fn get_batch( let mut acc_account_updates: HashMap = HashMap::new(); for block in batch { let vm_db = StoreVmDatabase::new(state.store.clone(), block.header.parent_hash); - let mut vm = Evm::new(EvmEngine::default(), vm_db); + let mut vm = state.blockchain.new_evm(vm_db)?; vm.execute_block(block) .map_err(BlockFetcherError::EvmError)?; let account_updates = vm diff --git a/crates/l2/prover/Cargo.toml b/crates/l2/prover/Cargo.toml index e4994ddfe9..0510060a77 100644 --- a/crates/l2/prover/Cargo.toml +++ b/crates/l2/prover/Cargo.toml @@ -67,7 +67,6 @@ sp1 = ["zkvm_interface/sp1", "dep:sp1-sdk"] gpu = ["risc0-zkvm?/cuda", "sp1-sdk?/cuda"] l2 = [ - "ethrex-vm/l2", "zkvm_interface/l2", "ethrex-blockchain/l2", "ethrex-l2/l2", diff --git a/crates/l2/prover/zkvm/interface/Cargo.toml b/crates/l2/prover/zkvm/interface/Cargo.toml index 7419050182..5efb26df81 100644 --- a/crates/l2/prover/zkvm/interface/Cargo.toml +++ b/crates/l2/prover/zkvm/interface/Cargo.toml @@ -34,7 +34,7 @@ methods = ["risc0"] default = [] risc0 = ["dep:risc0-build"] sp1 = ["dep:sp1-build", "dep:sp1-sdk"] -l2 = ["ethrex-vm/l2"] +l2 = [] [lib] path = "./src/lib.rs" diff --git a/crates/l2/prover/zkvm/interface/risc0/Cargo.toml b/crates/l2/prover/zkvm/interface/risc0/Cargo.toml index 925a4f90e5..98cf13639b 100644 --- a/crates/l2/prover/zkvm/interface/risc0/Cargo.toml +++ b/crates/l2/prover/zkvm/interface/risc0/Cargo.toml @@ -27,4 +27,4 @@ secp256k1 = { git = "https://github.com/sp1-patches/rust-secp256k1", branch = "p ecdsa-core = { git = "https://github.com/sp1-patches/signatures", package = "ecdsa", branch = "patch-ecdsa-v0.16.9" } [features] -l2 = ["ethrex-vm/l2", "zkvm_interface/l2", "ethrex-blockchain/l2"] +l2 = ["zkvm_interface/l2", "ethrex-blockchain/l2"] diff --git a/crates/l2/prover/zkvm/interface/sp1/Cargo.toml b/crates/l2/prover/zkvm/interface/sp1/Cargo.toml index 65d9a44ea6..96e9fe0518 100644 --- a/crates/l2/prover/zkvm/interface/sp1/Cargo.toml +++ b/crates/l2/prover/zkvm/interface/sp1/Cargo.toml @@ -36,4 +36,4 @@ rsa = { git = "https://github.com/sp1-patches/RustCrypto-RSA", tag = "patch-0.9. # ecdsa = { git = "https://github.com/sp1-patches/signatures", tag = "patch-16.9-sp1-4.1.0" } [features] -l2 = ["ethrex-vm/l2", "zkvm_interface/l2", "ethrex-blockchain/l2"] +l2 = ["zkvm_interface/l2", "ethrex-blockchain/l2"] diff --git a/crates/l2/prover/zkvm/interface/src/execution.rs b/crates/l2/prover/zkvm/interface/src/execution.rs index 00480acd5e..4672c51672 100644 --- a/crates/l2/prover/zkvm/interface/src/execution.rs +++ b/crates/l2/prover/zkvm/interface/src/execution.rs @@ -43,7 +43,7 @@ pub enum StatelessExecutionError { #[error("Receipts validation error: {0}")] ReceiptsRootValidationError(ChainError), #[error("EVM error: {0}")] - EvmError(EvmError), + EvmError(#[from] EvmError), #[cfg(feature = "l2")] #[error("L1Message calculation error: {0}")] L1MessageError(#[from] L1MessagingError), @@ -265,7 +265,10 @@ fn execute_stateless( .map_err(StatelessExecutionError::BlockValidationError)?; // Execute block - let mut vm = Evm::new(EvmEngine::LEVM, db.clone()); + #[cfg(feature = "l2")] + let mut vm = Evm::new_for_l2(EvmEngine::LEVM, db.clone())?; + #[cfg(not(feature = "l2"))] + let mut vm = Evm::new_for_l1(EvmEngine::LEVM, db.clone()); let result = vm .execute_block(block) .map_err(StatelessExecutionError::EvmError)?; diff --git a/crates/l2/sequencer/block_producer/payload_builder.rs b/crates/l2/sequencer/block_producer/payload_builder.rs index a68c55d65c..a688eaa63d 100644 --- a/crates/l2/sequencer/block_producer/payload_builder.rs +++ b/crates/l2/sequencer/block_producer/payload_builder.rs @@ -42,7 +42,12 @@ pub async fn build_payload( let gas_limit = payload.header.gas_limit; debug!("Building payload"); - let mut context = PayloadBuildContext::new(payload, blockchain.evm_engine, store)?; + let mut context = PayloadBuildContext::new( + payload, + blockchain.evm_engine, + store, + blockchain.r#type.clone(), + )?; fill_transactions(blockchain.clone(), &mut context, store).await?; blockchain.finalize_payload(&mut context).await?; @@ -252,7 +257,7 @@ fn get_account_diffs_in_tx( "REVM not supported for L2".to_string(), ))); } - Evm::LEVM { db } => { + Evm::LEVM { db, .. } => { let transaction_backup = db.get_tx_backup().map_err(|e| { BlockProducerError::FailedToGetDataFrom(format!("TransactionBackup: {e}")) })?; diff --git a/crates/l2/sequencer/l1_committer.rs b/crates/l2/sequencer/l1_committer.rs index 18bee62cbc..a9bff70b4d 100644 --- a/crates/l2/sequencer/l1_committer.rs +++ b/crates/l2/sequencer/l1_committer.rs @@ -5,7 +5,7 @@ use crate::{ }; use bytes::Bytes; -use ethrex_blockchain::vm::StoreVmDatabase; +use ethrex_blockchain::{Blockchain, vm::StoreVmDatabase}; use ethrex_common::{ Address, H256, U256, types::{ @@ -29,9 +29,8 @@ use ethrex_rpc::clients::eth::{ }; use ethrex_storage::Store; use ethrex_storage_rollup::StoreRollup; -use ethrex_vm::{Evm, EvmEngine}; use secp256k1::SecretKey; -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; use tracing::{debug, error, info, warn}; use super::{errors::BlobEstimationError, utils::random_duration}; @@ -47,6 +46,7 @@ const COMMIT_FUNCTION_SIGNATURE: &str = "commitBatch(uint256,bytes32,bytes32,byt #[derive(Clone)] pub struct CommitterState { eth_client: EthClient, + blockchain: Arc, on_chain_proposer_address: Address, store: Store, rollup_store: StoreRollup, @@ -64,6 +64,7 @@ impl CommitterState { pub fn new( committer_config: &CommitterConfig, eth_config: &EthConfig, + blockchain: Arc, store: Store, rollup_store: StoreRollup, based: bool, @@ -79,6 +80,7 @@ impl CommitterState { Some(eth_config.maximum_allowed_max_fee_per_gas), Some(eth_config.maximum_allowed_max_fee_per_blob_gas), )?, + blockchain, on_chain_proposer_address: committer_config.on_chain_proposer_address, store, rollup_store, @@ -110,6 +112,7 @@ pub struct L1Committer; impl L1Committer { pub async fn spawn( store: Store, + blockchain: Arc, rollup_store: StoreRollup, cfg: SequencerConfig, sequencer_state: SequencerState, @@ -117,6 +120,7 @@ impl L1Committer { let state = CommitterState::new( &cfg.l1_committer, &cfg.eth, + blockchain, store.clone(), rollup_store.clone(), cfg.based.based, @@ -341,7 +345,7 @@ async fn prepare_batch_from_block( let vm_db = StoreVmDatabase::new(state.store.clone(), block_to_commit.header.parent_hash); - let mut vm = Evm::new(EvmEngine::default(), vm_db); + let mut vm = state.blockchain.new_evm(vm_db)?; vm.execute_block(&block_to_commit)?; vm.get_state_transitions()? }; diff --git a/crates/l2/sequencer/mod.rs b/crates/l2/sequencer/mod.rs index 5baf122686..7b5ba6d496 100644 --- a/crates/l2/sequencer/mod.rs +++ b/crates/l2/sequencer/mod.rs @@ -78,6 +78,7 @@ pub async fn start_l2( }); let _ = L1Committer::spawn( store.clone(), + blockchain.clone(), rollup_store.clone(), cfg.clone(), shared_state.clone(), diff --git a/crates/l2/tee/quote-gen/Cargo.toml b/crates/l2/tee/quote-gen/Cargo.toml index 7927d66728..7caa44d04e 100644 --- a/crates/l2/tee/quote-gen/Cargo.toml +++ b/crates/l2/tee/quote-gen/Cargo.toml @@ -29,8 +29,8 @@ secp256k1 = { version = "0.29.1", default-features = false, features = [ "global-context", "recovery", "rand", - "std" -]} + "std", +] } [build-dependencies] vergen-git2 = { version = "1.0.7"} @@ -40,9 +40,4 @@ vergen-git2 = { version = "1.0.7"} [features] default = ["l2"] -l2 = [ - "ethrex-vm/l2", - "zkvm_interface/l2", - "ethrex-blockchain/l2", - "ethrex-l2/l2", -] +l2 = ["zkvm_interface/l2", "ethrex-blockchain/l2", "ethrex-l2/l2"] diff --git a/crates/l2/utils/prover/save_state.rs b/crates/l2/utils/prover/save_state.rs index 4e06a6ae52..f2cd12c86f 100644 --- a/crates/l2/utils/prover/save_state.rs +++ b/crates/l2/utils/prover/save_state.rs @@ -394,7 +394,7 @@ pub fn batch_number_has_all_needed_proofs( #[allow(clippy::expect_used)] mod tests { use ethrex_blockchain::{Blockchain, vm::StoreVmDatabase}; - use ethrex_levm::db::gen_db::GeneralizedDatabase; + use ethrex_levm::{db::gen_db::GeneralizedDatabase, vm::VMType}; use ethrex_storage::{EngineType, Store}; use ethrex_vm::{ DynVmDatabase, @@ -459,7 +459,7 @@ mod tests { let store: DynVmDatabase = Box::new(StoreVmDatabase::new(in_memory_db.clone(), block.hash())); let mut db = GeneralizedDatabase::new(Arc::new(store), CacheDB::new()); - LEVM::execute_block(blocks.last().unwrap(), &mut db)?; + LEVM::execute_block(blocks.last().unwrap(), &mut db, VMType::L2)?; let account_updates = LEVM::get_state_transitions(&mut db)?; account_updates_vec.push(account_updates.clone()); diff --git a/crates/networking/rpc/eth/transaction.rs b/crates/networking/rpc/eth/transaction.rs index 0c29b044cb..92dd62d1ab 100644 --- a/crates/networking/rpc/eth/transaction.rs +++ b/crates/networking/rpc/eth/transaction.rs @@ -20,7 +20,7 @@ use ethrex_common::{ use ethrex_rlp::encode::RLPEncode; use ethrex_storage::Store; -use ethrex_vm::{Evm, ExecutionResult}; +use ethrex_vm::ExecutionResult; use serde::Serialize; use serde_json::Value; @@ -348,7 +348,7 @@ impl RpcHandler for CreateAccessListRequest { }; let vm_db = StoreVmDatabase::new(context.storage.clone(), header.hash()); - let mut vm = Evm::new(context.blockchain.evm_engine, vm_db); + let mut vm = context.blockchain.new_evm(vm_db)?; let chain_config = context.storage.get_chain_config()?; let fork = chain_config.get_fork(header.timestamp); @@ -574,8 +574,8 @@ fn simulate_tx( blockchain: Arc, fork: Fork, ) -> Result { - let db = StoreVmDatabase::new(storage.clone(), block_header.hash()); - let mut vm = Evm::new(blockchain.evm_engine, db); + let vm_db = StoreVmDatabase::new(storage.clone(), block_header.hash()); + let mut vm = blockchain.new_evm(vm_db)?; match vm.simulate_tx_from_generic(transaction, block_header, fork)? { ExecutionResult::Revert { diff --git a/crates/vm/Cargo.toml b/crates/vm/Cargo.toml index 99739c845d..362c8b42d0 100644 --- a/crates/vm/Cargo.toml +++ b/crates/vm/Cargo.toml @@ -42,7 +42,6 @@ path = "./lib.rs" [features] default = [] -l2 = ["ethrex-levm/l2"] c-kzg = ["revm/c-kzg", "ethrex-levm/c-kzg", "ethrex-common/c-kzg"] blst = ["revm/blst"] debug = ["ethrex-levm/debug"] diff --git a/crates/vm/backends/levm/mod.rs b/crates/vm/backends/levm/mod.rs index 13762ae899..5d59a4f8f9 100644 --- a/crates/vm/backends/levm/mod.rs +++ b/crates/vm/backends/levm/mod.rs @@ -17,11 +17,11 @@ use ethrex_common::{ }, }; use ethrex_levm::EVMConfig; -use ethrex_levm::call_frame::CallFrameBackup; use ethrex_levm::constants::{SYS_CALL_GAS_LIMIT, TX_BASE_COST}; use ethrex_levm::db::gen_db::GeneralizedDatabase; use ethrex_levm::errors::{InternalError, TxValidationError}; use ethrex_levm::tracing::LevmCallTracer; +use ethrex_levm::vm::VMType; use ethrex_levm::{ Environment, errors::{ExecutionReport, TxResult, VMError}, @@ -44,8 +44,9 @@ impl LEVM { pub fn execute_block( block: &Block, db: &mut GeneralizedDatabase, + vm_type: VMType, ) -> Result { - Self::prepare_block(block, db)?; + Self::prepare_block(block, db, vm_type.clone())?; let mut receipts = Vec::new(); let mut cumulative_gas_used = 0; @@ -53,7 +54,7 @@ impl LEVM { for (tx, tx_sender) in block.body.get_transactions_with_sender().map_err(|error| { EvmError::Transaction(format!("Couldn't recover addresses with error: {error}")) })? { - let report = Self::execute_tx(tx, tx_sender, &block.header, db)?; + let report = Self::execute_tx(tx, tx_sender, &block.header, db, vm_type.clone())?; cumulative_gas_used += report.gas_used; let receipt = Receipt::new( @@ -70,13 +71,13 @@ impl LEVM { Self::process_withdrawals(db, withdrawals)?; } - cfg_if::cfg_if! { - if #[cfg(not(feature = "l2"))] { - let requests = extract_all_requests_levm(&receipts, db, &block.header)?; - } else { - let requests = Default::default(); - } - } + // TODO: I don't like deciding the behavior based on the VMType here. + // TODO2: Revise this, apparently extract_all_requests_levm is not called + // in L2 execution, but its implementation behaves differently based on this. + let requests = match vm_type { + VMType::L1 => extract_all_requests_levm(&receipts, db, &block.header, vm_type)?, + VMType::L2 => Default::default(), + }; Ok(BlockExecutionResult { receipts, requests }) } @@ -130,39 +131,14 @@ impl LEVM { // The block header for the current block. block_header: &BlockHeader, db: &mut GeneralizedDatabase, + vm_type: VMType, ) -> Result { let env = Self::setup_env(tx, tx_sender, block_header, db)?; - let mut vm = VM::new(env, db, tx, LevmCallTracer::disabled()); + let mut vm = VM::new(env, db, tx, LevmCallTracer::disabled(), vm_type); vm.execute().map_err(VMError::into) } - pub fn execute_tx_l2( - // The transaction to execute. - tx: &Transaction, - // The transactions recovered address - tx_sender: Address, - // The block header for the current block. - block_header: &BlockHeader, - db: &mut GeneralizedDatabase, - ) -> Result<(ExecutionReport, CallFrameBackup), EvmError> { - let env = Self::setup_env(tx, tx_sender, block_header, db)?; - let mut vm = VM::new(env, db, tx, LevmCallTracer::disabled()); - - let report_result = vm.execute().map_err(EvmError::from)?; - - // Here we differ from the execute_tx function from the L1. - // We need to check if the transaction exceeded the blob size limit. - // If it did, we need to revert the state changes made by the transaction and return the error. - let call_frame_backup = vm - .call_frames - .pop() - .ok_or(VMError::Internal(InternalError::CallFrame))? - .call_frame_backup; - - Ok((report_result, call_frame_backup)) - } - pub fn undo_last_tx(db: &mut GeneralizedDatabase) -> Result<(), EvmError> { db.undo_last_transaction()?; Ok(()) @@ -174,6 +150,7 @@ impl LEVM { // The block header for the current block. block_header: &BlockHeader, db: &mut GeneralizedDatabase, + vm_type: VMType, ) -> Result { let mut env = env_from_generic(tx, block_header, db)?; @@ -181,7 +158,7 @@ impl LEVM { adjust_disabled_base_fee(&mut env); - let mut vm = vm_from_generic(tx, env, db)?; + let mut vm = vm_from_generic(tx, env, db, vm_type)?; vm.execute() .map(|value| value.into()) @@ -304,7 +281,14 @@ impl LEVM { pub fn beacon_root_contract_call( block_header: &BlockHeader, db: &mut GeneralizedDatabase, + vm_type: VMType, ) -> Result<(), EvmError> { + if let VMType::L2 = vm_type { + return Err(EvmError::InvalidEVM( + "beacon_root_contract_call should not be called for L2 VM".to_string(), + )); + } + let beacon_root = match block_header.parent_beacon_block_root { None => { return Err(EvmError::Header( @@ -320,6 +304,7 @@ impl LEVM { db, *BEACON_ROOTS_ADDRESS, *SYSTEM_ADDRESS, + vm_type, )?; Ok(()) } @@ -327,26 +312,42 @@ impl LEVM { pub fn process_block_hash_history( block_header: &BlockHeader, db: &mut GeneralizedDatabase, + vm_type: VMType, ) -> Result<(), EvmError> { + if let VMType::L2 = vm_type { + return Err(EvmError::InvalidEVM( + "process_block_hash_history should not be called for L2 VM".to_string(), + )); + } + generic_system_contract_levm( block_header, Bytes::copy_from_slice(block_header.parent_hash.as_bytes()), db, *HISTORY_STORAGE_ADDRESS, *SYSTEM_ADDRESS, + vm_type, )?; Ok(()) } pub(crate) fn read_withdrawal_requests( block_header: &BlockHeader, db: &mut GeneralizedDatabase, + vm_type: VMType, ) -> Result { + if let VMType::L2 = vm_type { + return Err(EvmError::InvalidEVM( + "read_withdrawal_requests should not be called for L2 VM".to_string(), + )); + } + let report = generic_system_contract_levm( block_header, Bytes::new(), db, *WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, *SYSTEM_ADDRESS, + vm_type, )?; // According to EIP-7002 we need to check if the WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS @@ -367,16 +368,25 @@ impl LEVM { ))), } } + pub(crate) fn dequeue_consolidation_requests( block_header: &BlockHeader, db: &mut GeneralizedDatabase, + vm_type: VMType, ) -> Result { + if let VMType::L2 = vm_type { + return Err(EvmError::InvalidEVM( + "dequeue_consolidation_requests should not be called for L2 VM".to_string(), + )); + } + let report = generic_system_contract_levm( block_header, Bytes::new(), db, *CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS, *SYSTEM_ADDRESS, + vm_type, )?; // According to EIP-7251 we need to check if the CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS @@ -402,39 +412,47 @@ impl LEVM { mut tx: GenericTransaction, header: &BlockHeader, db: &mut GeneralizedDatabase, + vm_type: VMType, ) -> Result<(ExecutionResult, AccessList), VMError> { let mut env = env_from_generic(&tx, header, db)?; adjust_disabled_base_fee(&mut env); - let mut vm = vm_from_generic(&tx, env.clone(), db)?; + let mut vm = vm_from_generic(&tx, env.clone(), db, vm_type.clone())?; vm.stateless_execute()?; let access_list = build_access_list(&vm.substate); // Execute the tx again, now with the created access list. tx.access_list = access_list.iter().map(|item| item.into()).collect(); - let mut vm = vm_from_generic(&tx, env.clone(), db)?; + let mut vm = vm_from_generic(&tx, env.clone(), db, vm_type)?; let report = vm.stateless_execute()?; Ok((report.into(), access_list)) } - pub fn prepare_block(block: &Block, db: &mut GeneralizedDatabase) -> Result<(), EvmError> { + pub fn prepare_block( + block: &Block, + db: &mut GeneralizedDatabase, + vm_type: VMType, + ) -> Result<(), EvmError> { let chain_config = db.store.get_chain_config()?; let block_header = &block.header; let fork = chain_config.fork(block_header.timestamp); + // TODO: I don't like deciding the behavior based on the VMType here. + if let VMType::L2 = vm_type { + return Ok(()); + } + if block_header.parent_beacon_block_root.is_some() && fork >= Fork::Cancun { - #[cfg(not(feature = "l2"))] - Self::beacon_root_contract_call(block_header, db)?; + Self::beacon_root_contract_call(block_header, db, vm_type.clone())?; } if fork >= Fork::Prague { //eip 2935: stores parent block hash in system contract - #[cfg(not(feature = "l2"))] - Self::process_block_hash_history(block_header, db)?; + Self::process_block_hash_history(block_header, db, vm_type)?; } Ok(()) } @@ -446,6 +464,7 @@ pub fn generic_system_contract_levm( db: &mut GeneralizedDatabase, contract_address: Address, system_address: Address, + vm_type: VMType, ) -> Result { let chain_config = db.store.get_chain_config()?; let config = EVMConfig::new_from_chain_config(&chain_config, block_header); @@ -478,7 +497,7 @@ pub fn generic_system_contract_levm( data: calldata, ..Default::default() }); - let mut vm = VM::new(env, db, tx, LevmCallTracer::disabled()); + let mut vm = VM::new(env, db, tx, LevmCallTracer::disabled(), vm_type); let report = vm.execute().map_err(EvmError::from)?; @@ -507,7 +526,14 @@ pub fn extract_all_requests_levm( receipts: &[Receipt], db: &mut GeneralizedDatabase, header: &BlockHeader, + vm_type: VMType, ) -> Result, EvmError> { + if let VMType::L2 = vm_type { + return Err(EvmError::InvalidEVM( + "extract_all_requests_levm should not be called for L2 VM".to_string(), + )); + } + let chain_config = db.store.get_chain_config()?; let fork = chain_config.fork(header.timestamp); @@ -515,10 +541,13 @@ pub fn extract_all_requests_levm( return Ok(Default::default()); } - let withdrawals_data: Vec = LEVM::read_withdrawal_requests(header, db)?.output.into(); - let consolidation_data: Vec = LEVM::dequeue_consolidation_requests(header, db)? + let withdrawals_data: Vec = LEVM::read_withdrawal_requests(header, db, vm_type.clone())? .output .into(); + let consolidation_data: Vec = + LEVM::dequeue_consolidation_requests(header, db, vm_type.clone())? + .output + .into(); let deposits = Requests::from_deposit_receipts(chain_config.deposit_contract_address, receipts) .ok_or(EvmError::InvalidDepositRequest)?; @@ -605,6 +634,7 @@ fn vm_from_generic<'a>( tx: &GenericTransaction, env: Environment, db: &'a mut GeneralizedDatabase, + vm_type: VMType, ) -> Result, VMError> { let tx = match &tx.authorization_list { Some(authorization_list) => Transaction::EIP7702Transaction(EIP7702Transaction { @@ -639,5 +669,5 @@ fn vm_from_generic<'a>( ..Default::default() }), }; - Ok(VM::new(env, db, &tx, LevmCallTracer::disabled())) + Ok(VM::new(env, db, &tx, LevmCallTracer::disabled(), vm_type)) } diff --git a/crates/vm/backends/levm/tracing.rs b/crates/vm/backends/levm/tracing.rs index f6cf204542..a889858232 100644 --- a/crates/vm/backends/levm/tracing.rs +++ b/crates/vm/backends/levm/tracing.rs @@ -1,5 +1,6 @@ use ethrex_common::types::{Block, Transaction}; use ethrex_common::{tracing::CallTrace, types::BlockHeader}; +use ethrex_levm::vm::VMType; use ethrex_levm::{db::gen_db::GeneralizedDatabase, tracing::LevmCallTracer, vm::VM}; use crate::{EvmError, backends::levm::LEVM}; @@ -11,8 +12,9 @@ impl LEVM { db: &mut GeneralizedDatabase, block: &Block, stop_index: Option, + vm_type: VMType, ) -> Result<(), EvmError> { - Self::prepare_block(block, db)?; + Self::prepare_block(block, db, vm_type.clone())?; // Executes transactions and stops when the index matches the stop index. for (index, (tx, sender)) in block @@ -26,7 +28,7 @@ impl LEVM { break; } - Self::execute_tx(tx, sender, &block.header, db)?; + Self::execute_tx(tx, sender, &block.header, db, vm_type.clone())?; } // Process withdrawals only if the whole block has been executed. @@ -46,6 +48,7 @@ impl LEVM { tx: &Transaction, only_top_call: bool, with_log: bool, + vm_type: VMType, ) -> Result { let env = Self::setup_env( tx, @@ -55,7 +58,13 @@ impl LEVM { block_header, db, )?; - let mut vm = VM::new(env, db, tx, LevmCallTracer::new(only_top_call, with_log)); + let mut vm = VM::new( + env, + db, + tx, + LevmCallTracer::new(only_top_call, with_log), + vm_type, + ); vm.execute()?; diff --git a/crates/vm/backends/mod.rs b/crates/vm/backends/mod.rs index bbcabe85ee..bfc9c6bfd4 100644 --- a/crates/vm/backends/mod.rs +++ b/crates/vm/backends/mod.rs @@ -15,6 +15,7 @@ use ethrex_common::types::{ pub use ethrex_levm::call_frame::CallFrameBackup; use ethrex_levm::db::gen_db::GeneralizedDatabase; use ethrex_levm::db::{CacheDB, Database as LevmDatabase}; +use ethrex_levm::vm::VMType; use levm::LEVM; use revm::REVM; use revm::db::EvmState; @@ -53,8 +54,13 @@ impl TryFrom for EvmEngine { #[allow(clippy::large_enum_variant)] #[derive(Clone)] pub enum Evm { - REVM { state: EvmState }, - LEVM { db: GeneralizedDatabase }, + REVM { + state: EvmState, + }, + LEVM { + db: GeneralizedDatabase, + vm_type: VMType, + }, } impl std::fmt::Debug for Evm { @@ -70,7 +76,7 @@ impl std::fmt::Debug for Evm { impl Evm { /// Creates a new EVM instance, but with block hash in zero, so if we want to execute a block or transaction we have to set it. - pub fn new(engine: EvmEngine, db: impl VmDatabase + 'static) -> Self { + pub fn new_for_l1(engine: EvmEngine, db: impl VmDatabase + 'static) -> Self { let wrapped_db: DynVmDatabase = Box::new(db); match engine { @@ -79,20 +85,47 @@ impl Evm { }, EvmEngine::LEVM => Evm::LEVM { db: GeneralizedDatabase::new(Arc::new(wrapped_db), CacheDB::new()), + vm_type: VMType::L1, }, } } - pub fn new_from_db(store: Arc) -> Self { + pub fn new_for_l2(engine: EvmEngine, db: impl VmDatabase + 'static) -> Result { + if let EvmEngine::REVM = engine { + return Err(EvmError::InvalidEVM( + "REVM is not supported for L2".to_string(), + )); + } + + let wrapped_db: DynVmDatabase = Box::new(db); + + let evm = Evm::LEVM { + db: GeneralizedDatabase::new(Arc::new(wrapped_db), CacheDB::new()), + vm_type: VMType::L2, + }; + + Ok(evm) + } + + pub fn new_from_db_for_l1(store: Arc) -> Self { + Self::_new_from_db(store, VMType::L1) + } + + pub fn new_from_db_for_l2(store: Arc) -> Self { + Self::_new_from_db(store, VMType::L2) + } + + fn _new_from_db(store: Arc, vm_type: VMType) -> Self { Evm::LEVM { db: GeneralizedDatabase::new(store, CacheDB::new()), + vm_type, } } pub fn execute_block(&mut self, block: &Block) -> Result { match self { Evm::REVM { state } => REVM::execute_block(block, state), - Evm::LEVM { db } => LEVM::execute_block(block, db), + Evm::LEVM { db, vm_type } => LEVM::execute_block(block, db, vm_type.clone()), } } @@ -128,8 +161,9 @@ impl Evm { Ok((receipt, execution_result.gas_used())) } - Evm::LEVM { db } => { - let execution_report = LEVM::execute_tx(tx, sender, block_header, db)?; + Evm::LEVM { db, vm_type } => { + let execution_report = + LEVM::execute_tx(tx, sender, block_header, db, vm_type.clone())?; *remaining_gas = remaining_gas.saturating_sub(execution_report.gas_used); @@ -150,7 +184,7 @@ impl Evm { Evm::REVM { .. } => Err(EvmError::InvalidEVM( "Undoing transaction not supported in REVM".to_string(), )), - Evm::LEVM { db } => LEVM::undo_last_tx(db), + Evm::LEVM { db, .. } => LEVM::undo_last_tx(db), } } @@ -172,16 +206,16 @@ impl Evm { Ok(()) } - Evm::LEVM { db } => { + Evm::LEVM { db, vm_type } => { let chain_config = db.store.get_chain_config()?; let fork = chain_config.fork(block_header.timestamp); if block_header.parent_beacon_block_root.is_some() && fork >= Fork::Cancun { - LEVM::beacon_root_contract_call(block_header, db)?; + LEVM::beacon_root_contract_call(block_header, db, vm_type.clone())?; } if fork >= Fork::Prague { - LEVM::process_block_hash_history(block_header, db)?; + LEVM::process_block_hash_history(block_header, db, vm_type.clone())?; } Ok(()) @@ -200,7 +234,7 @@ impl Evm { pub fn get_state_transitions(&mut self) -> Result, EvmError> { match self { Evm::REVM { state } => Ok(REVM::get_state_transitions(state)), - Evm::LEVM { db } => LEVM::get_state_transitions(db), + Evm::LEVM { db, .. } => LEVM::get_state_transitions(db), } } @@ -209,7 +243,7 @@ impl Evm { pub fn process_withdrawals(&mut self, withdrawals: &[Withdrawal]) -> Result<(), EvmError> { match self { Evm::REVM { state } => REVM::process_withdrawals(state, withdrawals), - Evm::LEVM { db } => LEVM::process_withdrawals(db, withdrawals), + Evm::LEVM { db, .. } => LEVM::process_withdrawals(db, withdrawals), } } @@ -219,7 +253,9 @@ impl Evm { header: &BlockHeader, ) -> Result, EvmError> { match self { - Evm::LEVM { db } => levm::extract_all_requests_levm(receipts, db, header), + Evm::LEVM { db, vm_type } => { + levm::extract_all_requests_levm(receipts, db, header, vm_type.clone()) + } Evm::REVM { state } => revm::extract_all_requests(receipts, state, header), } } @@ -235,7 +271,9 @@ impl Evm { let spec_id = fork_to_spec_id(fork); self::revm::helpers::simulate_tx_from_generic(tx, header, state, spec_id) } - Evm::LEVM { db } => LEVM::simulate_tx_from_generic(tx, header, db), + Evm::LEVM { db, vm_type } => { + LEVM::simulate_tx_from_generic(tx, header, db, vm_type.clone()) + } } } @@ -251,7 +289,9 @@ impl Evm { self::revm::helpers::create_access_list(tx, header, state, spec_id)? } - Evm::LEVM { db } => LEVM::create_access_list(tx.clone(), header, db)?, + Evm::LEVM { db, vm_type } => { + LEVM::create_access_list(tx.clone(), header, db, vm_type.clone())? + } }; match result { ( diff --git a/crates/vm/backends/revm/mod.rs b/crates/vm/backends/revm/mod.rs index e3654016cb..32a9c1bb71 100644 --- a/crates/vm/backends/revm/mod.rs +++ b/crates/vm/backends/revm/mod.rs @@ -58,17 +58,14 @@ impl REVM { &state.inner.database.get_chain_config()?, block_header.timestamp, ); - cfg_if::cfg_if! { - if #[cfg(not(feature = "l2"))] { - if block_header.parent_beacon_block_root.is_some() && spec_id >= SpecId::CANCUN { - Self::beacon_root_contract_call(block_header, state)?; - } - //eip 2935: stores parent block hash in system contract - if spec_id >= SpecId::PRAGUE { - Self::process_block_hash_history(block_header, state)?; - } - } + if block_header.parent_beacon_block_root.is_some() && spec_id >= SpecId::CANCUN { + Self::beacon_root_contract_call(block_header, state)?; + } + + //eip 2935: stores parent block hash in system contract + if spec_id >= SpecId::PRAGUE { + Self::process_block_hash_history(block_header, state)?; } let mut receipts = Vec::new(); @@ -93,13 +90,7 @@ impl REVM { Self::process_withdrawals(state, withdrawals)?; } - cfg_if::cfg_if! { - if #[cfg(not(feature = "l2"))] { - let requests = extract_all_requests(&receipts, state, block_header)?; - } else { - let requests = Default::default(); - } - } + let requests = extract_all_requests(&receipts, state, block_header)?; Ok(BlockExecutionResult { receipts, requests }) } @@ -671,12 +662,6 @@ pub fn extract_all_requests( return Ok(Default::default()); } - cfg_if::cfg_if! { - if #[cfg(feature = "l2")] { - return Ok(Default::default()); - } - } - let deposits = Requests::from_deposit_receipts(config.deposit_contract_address, receipts) .ok_or(EvmError::InvalidDepositRequest)?; let withdrawals_data = REVM::read_withdrawal_requests(header, state)?; diff --git a/crates/vm/backends/revm/tracing.rs b/crates/vm/backends/revm/tracing.rs index 87fef13912..f3cd4b28a5 100644 --- a/crates/vm/backends/revm/tracing.rs +++ b/crates/vm/backends/revm/tracing.rs @@ -47,16 +47,14 @@ impl REVM { ) -> Result<(), EvmError> { let spec_id: SpecId = spec_id(&state.chain_config()?, block.header.timestamp); let block_env = block_env(&block.header, spec_id); - cfg_if::cfg_if! { - if #[cfg(not(feature = "l2"))] { - if block.header.parent_beacon_block_root.is_some() && spec_id >= SpecId::CANCUN { - Self::beacon_root_contract_call(&block.header, state)?; - } - //eip 2935: stores parent block hash in system contract - if spec_id >= SpecId::PRAGUE { - Self::process_block_hash_history(&block.header, state)?; - } - } + + if block.header.parent_beacon_block_root.is_some() && spec_id >= SpecId::CANCUN { + Self::beacon_root_contract_call(&block.header, state)?; + } + + //eip 2935: stores parent block hash in system contract + if spec_id >= SpecId::PRAGUE { + Self::process_block_hash_history(&block.header, state)?; } for (index, (tx, sender)) in block diff --git a/crates/vm/levm/Cargo.toml b/crates/vm/levm/Cargo.toml index 835b69db3d..80a5961ded 100644 --- a/crates/vm/levm/Cargo.toml +++ b/crates/vm/levm/Cargo.toml @@ -43,7 +43,6 @@ spinoff = "0.8.0" default = [] c-kzg = ["ethrex-common/c-kzg"] ethereum_foundation_tests = [] -l2 = [] debug = [] [lints.rust] diff --git a/crates/vm/levm/bench/revm_comparison/src/levm_bench.rs b/crates/vm/levm/bench/revm_comparison/src/levm_bench.rs index 7577b23126..fd1a56aa6f 100644 --- a/crates/vm/levm/bench/revm_comparison/src/levm_bench.rs +++ b/crates/vm/levm/bench/revm_comparison/src/levm_bench.rs @@ -6,7 +6,11 @@ use ethrex_common::{ types::{Account, EIP1559Transaction, Transaction, TxKind}, }; use ethrex_levm::{ - Environment, db::gen_db::GeneralizedDatabase, errors::TxResult, tracing::LevmCallTracer, vm::VM, + Environment, + db::gen_db::GeneralizedDatabase, + errors::TxResult, + tracing::LevmCallTracer, + vm::{VM, VMType}, }; use ethrex_storage::Store; use ethrex_vm::DynVmDatabase; @@ -76,5 +80,5 @@ fn init_vm(db: &mut GeneralizedDatabase, nonce: u64, calldata: Bytes) -> VM { data: calldata, ..Default::default() }); - VM::new(env, db, &tx, LevmCallTracer::disabled()) + VM::new(env, db, &tx, LevmCallTracer::disabled(), VMType::L1) } diff --git a/crates/vm/levm/src/gas_cost.rs b/crates/vm/levm/src/gas_cost.rs index 4c65ebe9a3..d348c70fe7 100644 --- a/crates/vm/levm/src/gas_cost.rs +++ b/crates/vm/levm/src/gas_cost.rs @@ -177,8 +177,6 @@ pub const BLS12_381_MAP_FP_TO_G1_COST: u64 = 5500; pub const BLS12_PAIRING_CHECK_MUL_COST: u64 = 32600; pub const BLS12_PAIRING_CHECK_FIXED_COST: u64 = 37700; pub const BLS12_381_MAP_FP2_TO_G2_COST: u64 = 23800; -#[cfg(feature = "l2")] -pub const P256VERIFY_COST: u64 = 3450; // Floor cost per token, specified in https://eips.ethereum.org/EIPS/eip-7623 pub const TOTAL_COST_FLOOR_PER_TOKEN: u64 = 10; diff --git a/crates/vm/levm/src/hooks/hook.rs b/crates/vm/levm/src/hooks/hook.rs index d24054b0a8..023a2a5ada 100644 --- a/crates/vm/levm/src/hooks/hook.rs +++ b/crates/vm/levm/src/hooks/hook.rs @@ -1,10 +1,9 @@ use std::{cell::RefCell, rc::Rc}; -use ethrex_common::types::Transaction; - use crate::{ errors::{ContextResult, VMError}, - vm::VM, + hooks::{L2Hook, backup_hook::BackupHook, default_hook::DefaultHook}, + vm::{VM, VMType}, }; pub trait Hook { @@ -17,20 +16,20 @@ pub trait Hook { ) -> Result<(), VMError>; } -pub fn get_hooks(_tx: &Transaction) -> Vec>> { - #[cfg(not(feature = "l2"))] - { - use crate::hooks::default_hook::DefaultHook; - vec![Rc::new(RefCell::new(DefaultHook))] +pub fn get_hooks(vm_type: &VMType) -> Vec>> { + match vm_type { + VMType::L1 => l1_hooks(), + VMType::L2 => l2_hooks(), } +} - #[cfg(feature = "l2")] - { - use crate::hooks::{L2Hook, backup_hook::BackupHook}; +pub fn l1_hooks() -> Vec>> { + vec![Rc::new(RefCell::new(DefaultHook))] +} - vec![ - Rc::new(RefCell::new(L2Hook {})), - Rc::new(RefCell::new(BackupHook::default())), - ] - } +pub fn l2_hooks() -> Vec>> { + vec![ + Rc::new(RefCell::new(L2Hook {})), + Rc::new(RefCell::new(BackupHook::default())), + ] } diff --git a/crates/vm/levm/src/hooks/mod.rs b/crates/vm/levm/src/hooks/mod.rs index 6eb6e77b4d..29ac5fbc2d 100644 --- a/crates/vm/levm/src/hooks/mod.rs +++ b/crates/vm/levm/src/hooks/mod.rs @@ -1,9 +1,7 @@ pub mod backup_hook; pub mod default_hook; pub mod hook; -#[cfg(feature = "l2")] pub mod l2_hook; pub use default_hook::DefaultHook; -#[cfg(feature = "l2")] pub use l2_hook::L2Hook; diff --git a/crates/vm/levm/src/l2_precompiles.rs b/crates/vm/levm/src/l2_precompiles.rs new file mode 100644 index 0000000000..592cb86b7c --- /dev/null +++ b/crates/vm/levm/src/l2_precompiles.rs @@ -0,0 +1,142 @@ +use bytes::Bytes; +use ethrex_common::{Address, H160, types::Fork}; +use p256::{ + EncodedPoint, FieldElement as P256FieldElement, NistP256, + ecdsa::{Signature as P256Signature, VerifyingKey, signature::hazmat::PrehashVerifier}, + elliptic_curve::{Curve, bigint::U256 as P256Uint, ff::PrimeField}, +}; + +use crate::{ + errors::{InternalError, PrecompileError, VMError}, + precompiles::{self}, +}; + +pub const P256VERIFY_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x00, +]); + +pub const RIP_PRECOMPILES: [H160; 1] = [P256VERIFY_ADDRESS]; + +// Secp256r1 curve parameters +// See https://neuromancer.sk/std/secg/secp256r1 +const P256_P: P256Uint = P256Uint::from_be_hex(P256FieldElement::MODULUS); +const P256_N: P256Uint = NistP256::ORDER; +const P256_A: P256FieldElement = P256FieldElement::from_u64(3).neg(); +const P256_B_UINT: P256Uint = + P256Uint::from_be_hex("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b"); +lazy_static::lazy_static! { + static ref P256_B: P256FieldElement = P256FieldElement::from_uint(P256_B_UINT).unwrap(); +} + +pub const P256VERIFY_COST: u64 = 3450; + +pub fn execute_precompile( + address: Address, + calldata: &Bytes, + gas_remaining: &mut u64, +) -> Result { + let result = match address { + address if address == P256VERIFY_ADDRESS => p_256_verify(calldata, gas_remaining)?, + _ => return precompiles::execute_precompile(address, calldata, gas_remaining), + }; + Ok(result) +} + +pub fn is_precompile(address: &Address, fork: Fork) -> bool { + precompiles::is_precompile(address, fork) || RIP_PRECOMPILES.contains(address) +} + +/// Signature verification in the “secp256r1” elliptic curve +/// If the verification succeeds, returns 1 in a 32-bit big-endian format. +/// If the verification fails, returns an empty `Bytes` object. +/// Implemented following https://github.com/ethereum/RIPs/blob/89474e2b9dbd066fac9446c8cd280651bda35849/RIPS/rip-7212.md?plain=1#L1. +pub fn p_256_verify(calldata: &Bytes, gas_remaining: &mut u64) -> Result { + let gas_cost = P256VERIFY_COST; + precompiles::increase_precompile_consumed_gas(gas_cost, gas_remaining)?; + + // If calldata does not reach the required length, we should fill the rest with zeros + let calldata = precompiles::fill_with_zeros(calldata, 160); + + // Parse parameters + let message_hash = calldata + .get(0..32) + .ok_or(PrecompileError::ParsingInputError)?; + let r = calldata + .get(32..64) + .ok_or(PrecompileError::ParsingInputError)?; + let s = calldata + .get(64..96) + .ok_or(PrecompileError::ParsingInputError)?; + let x = calldata + .get(96..128) + .ok_or(PrecompileError::ParsingInputError)?; + let y = calldata + .get(128..160) + .ok_or(PrecompileError::ParsingInputError)?; + + if !validate_p256_parameters(r, s, x, y)? { + return Ok(Bytes::new()); + } + + // Build verifier + let Ok(verifier) = VerifyingKey::from_encoded_point(&EncodedPoint::from_affine_coordinates( + x.into(), + y.into(), + false, + )) else { + return Ok(Bytes::new()); + }; + + // Build signature + let r: [u8; 32] = r.try_into().map_err(|_| InternalError::Slicing)?; + let s: [u8; 32] = s.try_into().map_err(|_| InternalError::Slicing)?; + + let Ok(signature) = P256Signature::from_scalars(r, s) else { + return Ok(Bytes::new()); + }; + + // Verify message signature + let success = verifier.verify_prehash(message_hash, &signature).is_ok(); + + // If the verification succeeds, returns 1 in a 32-bit big-endian format. + // If the verification fails, returns an empty `Bytes` object. + if success { + let mut result = [0; 32]; + result[31] = 1; + Ok(Bytes::from(result.to_vec())) + } else { + Ok(Bytes::new()) + } +} + +/// Following https://github.com/ethereum/RIPs/blob/89474e2b9dbd066fac9446c8cd280651bda35849/RIPS/rip-7212.md?plain=1#L86 +fn validate_p256_parameters(r: &[u8], s: &[u8], x: &[u8], y: &[u8]) -> Result { + let [r, s, x, y] = [r, s, x, y].map(P256Uint::from_be_slice); + + // Verify that the r and s values are in (0, n) (exclusive) + if r == P256Uint::ZERO || r >= P256_N || s == P256Uint::ZERO || s >= P256_N { + return Ok(false); + } + + // Verify that both x and y are in [0, p) (inclusive 0, exclusive p) + if x >= P256_P || y >= P256_P { + return Ok(false); + } + + // Verify that the point formed by (x, y) is on the curve + let x: Option = P256FieldElement::from_uint(x).into(); + let y: Option = P256FieldElement::from_uint(y).into(); + + let (Some(x), Some(y)) = (x, y) else { + return Err(InternalError::Slicing.into()); + }; + + // Curve equation: `y² = x³ + ax + b` + let a_x = P256_A.multiply(&x); + if y.square() == x.pow_vartime(&[3u64]).add(&a_x).add(&P256_B) { + return Ok(true); + } + + Ok(false) +} diff --git a/crates/vm/levm/src/lib.rs b/crates/vm/levm/src/lib.rs index 39530933bd..619ca4874e 100644 --- a/crates/vm/levm/src/lib.rs +++ b/crates/vm/levm/src/lib.rs @@ -15,3 +15,4 @@ pub mod tracing; pub mod utils; pub mod vm; pub use environment::*; +pub mod l2_precompiles; diff --git a/crates/vm/levm/src/precompiles.rs b/crates/vm/levm/src/precompiles.rs index dd2104331f..b9681f0d82 100644 --- a/crates/vm/levm/src/precompiles.rs +++ b/crates/vm/levm/src/precompiles.rs @@ -33,38 +33,14 @@ use lambdaworks_math::{ unsigned_integer::element, }; use num_bigint::BigUint; -#[cfg(feature = "l2")] -use p256::{ - EncodedPoint, FieldElement as P256FieldElement, NistP256, - ecdsa::{Signature as P256Signature, VerifyingKey, signature::hazmat::PrehashVerifier}, - elliptic_curve::{Curve, bigint::U256 as P256Uint, ff::PrimeField}, -}; use secp256k1::{ Message, ecdsa::{RecoverableSignature, RecoveryId}, }; -// Secp256r1 curve parameters -// See https://neuromancer.sk/std/secg/secp256r1 -#[cfg(feature = "l2")] -const P256_P: P256Uint = P256Uint::from_be_hex(P256FieldElement::MODULUS); -#[cfg(feature = "l2")] -const P256_N: P256Uint = NistP256::ORDER; -#[cfg(feature = "l2")] -const P256_A: P256FieldElement = P256FieldElement::from_u64(3).neg(); -#[cfg(feature = "l2")] -const P256_B_UINT: P256Uint = - P256Uint::from_be_hex("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b"); -#[cfg(feature = "l2")] -lazy_static::lazy_static! { - static ref P256_B: P256FieldElement = P256FieldElement::from_uint(P256_B_UINT).unwrap(); -} - use sha3::Digest; use std::ops::Mul; -#[cfg(feature = "l2")] -use crate::gas_cost::P256VERIFY_COST; use crate::{ constants::VERSIONED_HASH_VERSION_KZG, errors::{ExceptionalHalt, InternalError, PrecompileError, VMError}, @@ -145,12 +121,6 @@ pub const BLS12_MAP_FP2_TO_G2_ADDRESS: H160 = H160([ 0x00, 0x00, 0x00, 0x11, ]); -#[cfg(feature = "l2")] -pub const P256VERIFY_ADDRESS: H160 = H160([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x01, 0x00, -]); - pub const PRECOMPILES: [H160; 10] = [ ECRECOVER_ADDRESS, SHA2_256_ADDRESS, @@ -174,9 +144,6 @@ pub const PRECOMPILES_POST_CANCUN: [H160; 7] = [ BLS12_MAP_FP2_TO_G2_ADDRESS, ]; -#[cfg(feature = "l2")] -pub const RIP_PRECOMPILES: [H160; 1] = [P256VERIFY_ADDRESS]; - pub const BLAKE2F_ELEMENT_SIZE: usize = 8; pub const SIZE_PRECOMPILES_PRE_CANCUN: u64 = 9; @@ -224,11 +191,6 @@ pub fn is_precompile(address: &Address, fork: Fork) -> bool { return false; } - #[cfg(feature = "l2")] - if RIP_PRECOMPILES.contains(address) { - return true; - } - PRECOMPILES.contains(address) || PRECOMPILES_POST_CANCUN.contains(address) } @@ -263,8 +225,6 @@ pub fn execute_precompile( address if address == BLS12_MAP_FP2_TO_G2_ADDRESS => { bls12_map_fp2_tp_g2(calldata, gas_remaining)? } - #[cfg(feature = "l2")] - address if address == P256VERIFY_ADDRESS => p_256_verify(calldata, gas_remaining)?, _ => return Err(InternalError::InvalidPrecompileAddress.into()), }; @@ -272,7 +232,10 @@ pub fn execute_precompile( } /// Consumes gas and if it's higher than the gas limit returns an error. -fn increase_precompile_consumed_gas(gas_cost: u64, gas_remaining: &mut u64) -> Result<(), VMError> { +pub(crate) fn increase_precompile_consumed_gas( + gas_cost: u64, + gas_remaining: &mut u64, +) -> Result<(), VMError> { *gas_remaining = gas_remaining .checked_sub(gas_cost) .ok_or(PrecompileError::NotEnoughGas)?; @@ -281,7 +244,7 @@ fn increase_precompile_consumed_gas(gas_cost: u64, gas_remaining: &mut u64) -> R /// When slice length is less than `target_len`, the rest is filled with zeros. If slice length is /// more than `target_len`, the excess bytes are discarded. -fn fill_with_zeros(calldata: &Bytes, target_len: usize) -> Bytes { +pub(crate) fn fill_with_zeros(calldata: &Bytes, target_len: usize) -> Bytes { let mut padded_calldata = calldata.to_vec(); if padded_calldata.len() < target_len { padded_calldata.resize(target_len, 0); @@ -1584,99 +1547,3 @@ fn parse_scalar(scalar_raw_bytes: Option<&[u8]>) -> Result { scalar_le.reverse(); Ok(Scalar::from_raw(scalar_le)) } - -#[cfg(feature = "l2")] -/// Signature verification in the “secp256r1” elliptic curve -/// If the verification succeeds, returns 1 in a 32-bit big-endian format. -/// If the verification fails, returns an empty `Bytes` object. -/// Implemented following https://github.com/ethereum/RIPs/blob/89474e2b9dbd066fac9446c8cd280651bda35849/RIPS/rip-7212.md?plain=1#L1. -pub fn p_256_verify(calldata: &Bytes, gas_remaining: &mut u64) -> Result { - let gas_cost = P256VERIFY_COST; - increase_precompile_consumed_gas(gas_cost, gas_remaining)?; - - // If calldata does not reach the required length, we should fill the rest with zeros - let calldata = fill_with_zeros(calldata, 160); - - // Parse parameters - let message_hash = calldata - .get(0..32) - .ok_or(PrecompileError::ParsingInputError)?; - let r = calldata - .get(32..64) - .ok_or(PrecompileError::ParsingInputError)?; - let s = calldata - .get(64..96) - .ok_or(PrecompileError::ParsingInputError)?; - let x = calldata - .get(96..128) - .ok_or(PrecompileError::ParsingInputError)?; - let y = calldata - .get(128..160) - .ok_or(PrecompileError::ParsingInputError)?; - - if !validate_p256_parameters(r, s, x, y)? { - return Ok(Bytes::new()); - } - - // Build verifier - let Ok(verifier) = VerifyingKey::from_encoded_point(&EncodedPoint::from_affine_coordinates( - x.into(), - y.into(), - false, - )) else { - return Ok(Bytes::new()); - }; - - // Build signature - let r: [u8; 32] = r.try_into().map_err(|_| InternalError::Slicing)?; - let s: [u8; 32] = s.try_into().map_err(|_| InternalError::Slicing)?; - - let Ok(signature) = P256Signature::from_scalars(r, s) else { - return Ok(Bytes::new()); - }; - - // Verify message signature - let success = verifier.verify_prehash(message_hash, &signature).is_ok(); - - // If the verification succeeds, returns 1 in a 32-bit big-endian format. - // If the verification fails, returns an empty `Bytes` object. - if success { - let mut result = [0; 32]; - result[31] = 1; - Ok(Bytes::from(result.to_vec())) - } else { - Ok(Bytes::new()) - } -} - -#[cfg(feature = "l2")] -/// Following https://github.com/ethereum/RIPs/blob/89474e2b9dbd066fac9446c8cd280651bda35849/RIPS/rip-7212.md?plain=1#L86 -fn validate_p256_parameters(r: &[u8], s: &[u8], x: &[u8], y: &[u8]) -> Result { - let [r, s, x, y] = [r, s, x, y].map(P256Uint::from_be_slice); - - // Verify that the r and s values are in (0, n) (exclusive) - if r == P256Uint::ZERO || r >= P256_N || s == P256Uint::ZERO || s >= P256_N { - return Ok(false); - } - - // Verify that both x and y are in [0, p) (inclusive 0, exclusive p) - if x >= P256_P || y >= P256_P { - return Ok(false); - } - - // Verify that the point formed by (x, y) is on the curve - let x: Option = P256FieldElement::from_uint(x).into(); - let y: Option = P256FieldElement::from_uint(y).into(); - - let (Some(x), Some(y)) = (x, y) else { - return Err(InternalError::Slicing.into()); - }; - - // Curve equation: `y² = x³ + ax + b` - let a_x = P256_A.multiply(&x); - if y.square() == x.pow_vartime(&[3u64]).add(&a_x).add(&P256_B) { - return Ok(true); - } - - Ok(false) -} diff --git a/crates/vm/levm/src/utils.rs b/crates/vm/levm/src/utils.rs index e2719151cd..b659bda7c5 100644 --- a/crates/vm/levm/src/utils.rs +++ b/crates/vm/levm/src/utils.rs @@ -9,12 +9,12 @@ use crate::{ COLD_ADDRESS_ACCESS_COST, CREATE_BASE_COST, STANDARD_TOKEN_COST, TOTAL_COST_FLOOR_PER_TOKEN, WARM_ADDRESS_ACCESS_COST, fake_exponential, }, + l2_precompiles, opcodes::Opcode, precompiles::{ - SIZE_PRECOMPILES_CANCUN, SIZE_PRECOMPILES_PRAGUE, SIZE_PRECOMPILES_PRE_CANCUN, - is_precompile, + self, SIZE_PRECOMPILES_CANCUN, SIZE_PRECOMPILES_PRAGUE, SIZE_PRECOMPILES_PRE_CANCUN, }, - vm::{Substate, VM}, + vm::{Substate, VM, VMType}, }; use ExceptionalHalt::OutOfGas; use bytes::Bytes; @@ -554,7 +554,10 @@ impl<'a> VM<'a> { } pub fn is_precompile(&self, address: &Address) -> bool { - is_precompile(address, self.env.config.fork) + match self.vm_type { + VMType::L1 => precompiles::is_precompile(address, self.env.config.fork), + VMType::L2 => l2_precompiles::is_precompile(address, self.env.config.fork), + } } /// Backup of Substate, a copy of the current substate to restore if sub-context is reverted diff --git a/crates/vm/levm/src/vm.rs b/crates/vm/levm/src/vm.rs index 2c77fd5f3d..43bba22964 100644 --- a/crates/vm/levm/src/vm.rs +++ b/crates/vm/levm/src/vm.rs @@ -9,7 +9,7 @@ use crate::{ backup_hook::BackupHook, hook::{Hook, get_hooks}, }, - precompiles::execute_precompile, + l2_precompiles, precompiles, tracing::LevmCallTracer, }; use bytes::Bytes; @@ -26,6 +26,13 @@ use std::{ pub type Storage = HashMap; +#[derive(Debug, Clone, Default)] +pub enum VMType { + #[default] + L1, + L2, +} + #[derive(Debug, Clone, Default)] /// Information that changes during transaction execution pub struct Substate { @@ -52,6 +59,7 @@ pub struct VM<'a> { pub tracer: LevmCallTracer, /// Mode for printing some useful stuff, only used in development! pub debug_mode: DebugMode, + pub vm_type: VMType, } impl<'a> VM<'a> { @@ -60,8 +68,8 @@ impl<'a> VM<'a> { db: &'a mut GeneralizedDatabase, tx: &Transaction, tracer: LevmCallTracer, + vm_type: VMType, ) -> Self { - let hooks = get_hooks(tx); db.tx_backup = None; // If BackupHook is enabled, it will contain backup at the end of tx execution. Self { @@ -70,11 +78,12 @@ impl<'a> VM<'a> { substate: Substate::default(), db, tx: tx.clone(), - hooks, + hooks: get_hooks(&vm_type), substate_backups: vec![], storage_original_values: HashMap::new(), tracer, debug_mode: DebugMode::disabled(), + vm_type, } } @@ -192,14 +201,21 @@ impl<'a> VM<'a> { /// Executes precompile and handles the output that it returns, generating a report. pub fn execute_precompile(&mut self) -> Result { + let vm_type = self.vm_type.clone(); + let callframe = self.current_call_frame_mut()?; - let precompile_result = { - execute_precompile( + let precompile_result = match vm_type { + VMType::L1 => precompiles::execute_precompile( + callframe.code_address, + &callframe.calldata, + &mut callframe.gas_remaining, + ), + VMType::L2 => l2_precompiles::execute_precompile( callframe.code_address, &callframe.calldata, &mut callframe.gas_remaining, - ) + ), }; let ctx_result = self.handle_precompile_result(precompile_result)?; diff --git a/crates/vm/levm/tests/tests.rs b/crates/vm/levm/tests/tests.rs index fc998e0a99..c676c1505e 100644 --- a/crates/vm/levm/tests/tests.rs +++ b/crates/vm/levm/tests/tests.rs @@ -2,9 +2,8 @@ #![allow(clippy::unwrap_used)] use bytes::Bytes; +use ethrex_levm::l2_precompiles::p_256_verify; use ethrex_levm::precompiles::bls12_pairing_check; -#[cfg(feature = "l2")] -use ethrex_levm::precompiles::p_256_verify; #[test] fn pairing_infinity() { @@ -37,13 +36,10 @@ fn pairing_infinity() { assert_eq!(result.unwrap(), zero); } -#[cfg(feature = "l2")] use serde::Deserialize; -#[cfg(feature = "l2")] use std::fs; -#[cfg(feature = "l2")] #[derive(Debug, Deserialize)] struct P256TestCase { input: String, @@ -52,7 +48,6 @@ struct P256TestCase { name: String, } -#[cfg(feature = "l2")] #[test] fn p_256_verify_test() { // Taken from https://github.com/ulerdogan/go-ethereum/tree/ulerdogan-secp256r1. @@ -64,7 +59,8 @@ fn p_256_verify_test() { for test in tests { let calldata = hex::decode(&test.input).unwrap(); let calldata = Bytes::from(calldata); - let mut remaining_gas = 10000; + let initial_remaining_gas = 10000; + let mut remaining_gas = initial_remaining_gas; let result = p_256_verify(&calldata, &mut remaining_gas).unwrap(); let expected_result = Bytes::from(hex::decode(&test.expected).unwrap()); assert_eq!( @@ -73,7 +69,8 @@ fn p_256_verify_test() { test.name ); assert_eq!( - remaining_gas, test.gas, + initial_remaining_gas - remaining_gas, + test.gas, "Gas assertion failed on test: {}.", test.name ); diff --git a/crates/vm/tracing.rs b/crates/vm/tracing.rs index 645998dc1b..1b47cdbb96 100644 --- a/crates/vm/tracing.rs +++ b/crates/vm/tracing.rs @@ -28,9 +28,14 @@ impl Evm { Evm::REVM { state } => { REVM::trace_tx_calls(&block.header, tx, state, only_top_call, with_log) } - Evm::LEVM { db } => { - LEVM::trace_tx_calls(db, &block.header, tx, only_top_call, with_log) - } + Evm::LEVM { db, vm_type } => LEVM::trace_tx_calls( + db, + &block.header, + tx, + only_top_call, + with_log, + vm_type.clone(), + ), } } @@ -45,7 +50,7 @@ impl Evm { ) -> Result<(), EvmError> { match self { Evm::REVM { state } => REVM::rerun_block(block, state, stop_index), - Evm::LEVM { db } => LEVM::rerun_block(db, block, stop_index), + Evm::LEVM { db, vm_type } => LEVM::rerun_block(db, block, stop_index, vm_type.clone()), } } } From b05bfe0afa27900d265e562b97fba64f7f7da3ec Mon Sep 17 00:00:00 2001 From: Mario Rugiero Date: Mon, 30 Jun 2025 18:29:01 -0300 Subject: [PATCH 31/40] fix(core): more accurate throughput (#3412) Throughput in the logged metrics was computed over a truncated number of seconds, which meant the same block taking 1999ms or 1000ms reports the same throughput, when one is indeed twice as slow as the other. This fixes it by asking for the `as_secs_f64` directly rather than taking an integer number of millis, dividing (with integer semantics) by 1000 and then casting. --- crates/blockchain/blockchain.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/blockchain/blockchain.rs b/crates/blockchain/blockchain.rs index 11027f5808..7ca7875039 100644 --- a/crates/blockchain/blockchain.rs +++ b/crates/blockchain/blockchain.rs @@ -31,7 +31,7 @@ use mempool::Mempool; use std::collections::HashMap; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; -use std::{ops::Div, time::Instant}; +use std::time::Instant; use tokio_util::sync::CancellationToken; use vm::StoreVmDatabase; @@ -543,11 +543,11 @@ impl Blockchain { .await .map_err(|e| (e.into(), None))?; - let elapsed_seconds = interval.elapsed().as_millis() / 1000; + let elapsed_seconds = interval.elapsed().as_secs_f64(); let mut throughput = 0.0; - if elapsed_seconds != 0 && total_gas_used != 0 { - let as_gigas = (total_gas_used as f64).div(10_f64.powf(9_f64)); - throughput = (as_gigas) / (elapsed_seconds as f64); + if elapsed_seconds > 0.0 && total_gas_used != 0 { + let as_gigas = (total_gas_used as f64) / 1e9; + throughput = as_gigas / elapsed_seconds; } metrics!( From 385642da596af1fa228582581c183b4bb9f0a062 Mon Sep 17 00:00:00 2001 From: MrAzteca Date: Tue, 1 Jul 2025 13:14:17 +0200 Subject: [PATCH 32/40] perf(levm): refactor `CacheDB` to use more efficient APIs (#3259) **Motivation** The cache db is a bunch of functions that accept a state object as an argument. This is confusing since those are not methods, but functions, which also do stuff that the state object already supports natively (not to mention the duplicated function). **Description** Remove the `cache.rs` file and use the state object directly. Move stuff to more relevant places to fix borrow issues. Closes #issue_number --- crates/vm/levm/src/call_frame.rs | 16 ++++++ crates/vm/levm/src/db/cache.rs | 34 ------------- crates/vm/levm/src/db/gen_db.rs | 85 +++++++++++++------------------- crates/vm/levm/src/db/mod.rs | 5 +- crates/vm/levm/src/utils.rs | 10 ++-- 5 files changed, 57 insertions(+), 93 deletions(-) delete mode 100644 crates/vm/levm/src/db/cache.rs diff --git a/crates/vm/levm/src/call_frame.rs b/crates/vm/levm/src/call_frame.rs index 45094ff162..6005b4371b 100644 --- a/crates/vm/levm/src/call_frame.rs +++ b/crates/vm/levm/src/call_frame.rs @@ -186,6 +186,22 @@ pub struct CallFrameBackup { } impl CallFrameBackup { + pub fn backup_account_info( + &mut self, + address: Address, + account: &Account, + ) -> Result<(), InternalError> { + self.original_accounts_info + .entry(address) + .or_insert_with(|| Account { + info: account.info.clone(), + code: account.code.clone(), + storage: HashMap::new(), + }); + + Ok(()) + } + pub fn clear(&mut self) { self.original_accounts_info.clear(); self.original_account_storage_slots.clear(); diff --git a/crates/vm/levm/src/db/cache.rs b/crates/vm/levm/src/db/cache.rs deleted file mode 100644 index 09019bfd47..0000000000 --- a/crates/vm/levm/src/db/cache.rs +++ /dev/null @@ -1,34 +0,0 @@ -use ethrex_common::{Address, types::Account}; -use std::collections::HashMap; - -pub type CacheDB = HashMap; - -pub fn account_is_cached(cached_accounts: &CacheDB, address: &Address) -> bool { - cached_accounts.contains_key(address) -} - -pub fn get_account<'cache>( - cached_accounts: &'cache CacheDB, - address: &Address, -) -> Option<&'cache Account> { - cached_accounts.get(address) -} - -pub fn get_account_mut<'cache>( - cached_accounts: &'cache mut CacheDB, - address: &Address, -) -> Option<&'cache mut Account> { - cached_accounts.get_mut(address) -} - -pub fn insert_account( - cached_accounts: &mut CacheDB, - address: Address, - account: Account, -) -> Option { - cached_accounts.insert(address, account) -} - -pub fn is_account_cached(cached_accounts: &CacheDB, address: &Address) -> bool { - cached_accounts.contains_key(address) -} diff --git a/crates/vm/levm/src/db/gen_db.rs b/crates/vm/levm/src/db/gen_db.rs index 52fef14972..f049b2720f 100644 --- a/crates/vm/levm/src/db/gen_db.rs +++ b/crates/vm/levm/src/db/gen_db.rs @@ -1,5 +1,4 @@ use std::collections::HashMap; -use std::collections::HashSet; use std::sync::Arc; use bytes::Bytes; @@ -17,7 +16,8 @@ use crate::vm::VM; use super::CacheDB; use super::Database; -use super::cache; +use std::collections::HashSet; +use std::collections::hash_map::Entry; #[derive(Clone)] pub struct GeneralizedDatabase { @@ -46,11 +46,13 @@ impl GeneralizedDatabase { /// Gets account, first checking the cache and then the database /// (caching in the second case) pub fn get_account(&mut self, address: Address) -> Result<&Account, InternalError> { - if !cache::account_is_cached(&self.current_accounts_state, &address) { + if !self.current_accounts_state.contains_key(&address) { let account = self.get_account_from_database(address)?; - cache::insert_account(&mut self.current_accounts_state, address, account); + self.current_accounts_state.insert(address, account); } - cache::get_account(&self.current_accounts_state, &address) + + self.current_accounts_state + .get(&address) .ok_or(InternalError::AccountNotFound) } @@ -140,17 +142,25 @@ impl<'a> VM<'a> { */ pub fn get_account_mut(&mut self, address: Address) -> Result<&mut Account, InternalError> { - if cache::is_account_cached(&self.db.current_accounts_state, &address) { - self.backup_account_info(address)?; - cache::get_account_mut(&mut self.db.current_accounts_state, &address) - .ok_or(InternalError::AccountNotFound) - } else { - let acc = self.db.get_account_from_database(address)?; - cache::insert_account(&mut self.db.current_accounts_state, address, acc); - self.backup_account_info(address)?; - cache::get_account_mut(&mut self.db.current_accounts_state, &address) - .ok_or(InternalError::AccountNotFound) - } + let account = match self.db.current_accounts_state.entry(address) { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => { + let account = self.db.store.get_account(address)?; + self.db + .initial_accounts_state + .insert(address, account.clone()); + + entry.insert(account) + } + }; + + self.call_frames + .last_mut() + .ok_or(InternalError::CallFrame)? + .call_frame_backup + .backup_account_info(address, account)?; + + Ok(account) } pub fn increase_account_balance( @@ -220,9 +230,13 @@ impl<'a> VM<'a> { address: Address, account: Account, ) -> Result<(), InternalError> { - self.backup_account_info(address)?; - let _ = cache::insert_account(&mut self.db.current_accounts_state, address, account); + self.call_frames + .last_mut() + .ok_or(InternalError::CallFrame)? + .call_frame_backup + .backup_account_info(address, &account)?; + self.db.current_accounts_state.insert(address, account); Ok(()) } @@ -270,7 +284,7 @@ impl<'a> VM<'a> { address: Address, key: H256, ) -> Result { - if let Some(account) = cache::get_account(&self.db.current_accounts_state, &address) { + if let Some(account) = self.db.current_accounts_state.get(&address) { if let Some(value) = account.storage.get(&key) { return Ok(*value); } @@ -320,37 +334,4 @@ impl<'a> VM<'a> { Ok(()) } - - pub fn backup_account_info(&mut self, address: Address) -> Result<(), InternalError> { - if self.call_frames.is_empty() { - return Ok(()); - } - - let is_not_backed_up = !self - .current_call_frame_mut()? - .call_frame_backup - .original_accounts_info - .contains_key(&address); - - if is_not_backed_up { - let account = cache::get_account(&self.db.current_accounts_state, &address) - .ok_or(InternalError::AccountNotFound)?; - let info = account.info.clone(); - let code = account.code.clone(); - - self.current_call_frame_mut()? - .call_frame_backup - .original_accounts_info - .insert( - address, - Account { - info, - code, - storage: HashMap::new(), - }, - ); - } - - Ok(()) - } } diff --git a/crates/vm/levm/src/db/mod.rs b/crates/vm/levm/src/db/mod.rs index f2632b5564..94d4735432 100644 --- a/crates/vm/levm/src/db/mod.rs +++ b/crates/vm/levm/src/db/mod.rs @@ -1,14 +1,15 @@ use crate::errors::DatabaseError; use bytes::Bytes; -pub use cache::CacheDB; use ethrex_common::{ Address, H256, U256, types::{Account, ChainConfig}, }; +use std::collections::HashMap; -pub mod cache; pub mod gen_db; +pub type CacheDB = HashMap; + pub trait Database: Send + Sync { fn get_account(&self, address: Address) -> Result; fn get_storage_value(&self, address: Address, key: H256) -> Result; diff --git a/crates/vm/levm/src/utils.rs b/crates/vm/levm/src/utils.rs index b659bda7c5..67122c5318 100644 --- a/crates/vm/levm/src/utils.rs +++ b/crates/vm/levm/src/utils.rs @@ -2,7 +2,7 @@ use crate::{ EVMConfig, call_frame::CallFrameBackup, constants::*, - db::{cache, gen_db::GeneralizedDatabase}, + db::gen_db::GeneralizedDatabase, errors::{ExceptionalHalt, InternalError, TxValidationError, VMError}, gas_cost::{ self, ACCESS_LIST_ADDRESS_COST, ACCESS_LIST_STORAGE_KEY_COST, BLOB_GAS_PER_BLOB, @@ -127,9 +127,7 @@ pub fn restore_cache_state( callframe_backup: CallFrameBackup, ) -> Result<(), VMError> { for (address, account) in callframe_backup.original_accounts_info { - if let Some(current_account) = - cache::get_account_mut(&mut db.current_accounts_state, &address) - { + if let Some(current_account) = db.current_accounts_state.get_mut(&address) { current_account.info = account.info; current_account.code = account.code; } @@ -139,7 +137,9 @@ pub fn restore_cache_state( // This call to `get_account_mut` should never return None, because we are looking up accounts // that had their storage modified, which means they should be in the cache. That's why // we return an internal error in case we haven't found it. - let account = cache::get_account_mut(&mut db.current_accounts_state, &address) + let account = db + .current_accounts_state + .get_mut(&address) .ok_or(InternalError::AccountNotFound)?; for (key, value) in storage { From b8fea06064f267fa5bac0c09f565895f2beb2c43 Mon Sep 17 00:00:00 2001 From: Edgar Date: Tue, 1 Jul 2025 15:47:40 +0200 Subject: [PATCH 33/40] perf(levm): add fib recursive bench (#3391) **Motivation** The fibonacci recursive can show perfomance results of stack reuse that the factorial recursive one can't because factorial will never be able to "reuse" the stack. See also https://github.com/lambdaclass/ethrex/pull/3386 **Description** Closes #issue_number --- .gitignore | 2 ++ crates/vm/levm/Makefile | 4 ++++ .../contracts/FibonacciRecursive.sol | 18 ++++++++++++++++++ 3 files changed, 24 insertions(+) create mode 100644 crates/vm/levm/bench/revm_comparison/contracts/FibonacciRecursive.sol diff --git a/.gitignore b/.gitignore index 339f72cf75..0ac0aa286e 100644 --- a/.gitignore +++ b/.gitignore @@ -71,3 +71,5 @@ book/ # mdbook-mermaid artifacts mermaid-init.js mermaid.min.js + +bench-results/ diff --git a/crates/vm/levm/Makefile b/crates/vm/levm/Makefile index ce02d5a510..55717068b9 100644 --- a/crates/vm/levm/Makefile +++ b/crates/vm/levm/Makefile @@ -30,6 +30,7 @@ REPETITIONS_SLOW := 200 BENCH_FACT_ITERATIONS := 57 BENCH_FACT_REC_ITERATIONS := 57 BENCH_FIB_ITERATIONS := 57 +BENCH_FIB_REC_ITERATIONS := 15 BENCH_HASHES_ITERATIONS := 57 BENCH_BUBBLESORT_ITERATIONS := 100 # Size of the array to sort BENCH_MINT_ITERATIONS := 500 @@ -76,6 +77,7 @@ compile-contracts: revm-comparison: compile-contracts ## 📊 Run benchmarks of fibonacci and factorial for both REVM and LEVM $(MAKE) build-revm-comparison $(call run_benchmark,Fibonacci,REPETITIONS,BENCH_FIB_ITERATIONS) + $(call run_benchmark,FibonacciRecursive,REPETITIONS_SLOW,BENCH_FIB_REC_ITERATIONS) $(call run_benchmark,Factorial,REPETITIONS,BENCH_FACT_ITERATIONS) $(call run_benchmark,FactorialRecursive,REPETITIONS,BENCH_FACT_REC_ITERATIONS) $(call run_benchmark,Push,REPETITIONS,BENCH_PUSH_ITERATIONS) @@ -88,6 +90,7 @@ revm-comparison: compile-contracts ## 📊 Run benchmarks of fibonacci and facto revm-comparison-ci: compile-contracts mkdir -p ../../../benchmark_comparison_results $(call run_benchmark_ci,Fibonacci,REPETITIONS,BENCH_FIB_ITERATIONS) + $(call run_benchmark_ci,FibonacciRecursive,REPETITIONS_SLOW,BENCH_FIB_REC_ITERATIONS) $(call run_benchmark_ci,Factorial,REPETITIONS,BENCH_FACT_ITERATIONS) $(call run_benchmark_ci,FactorialRecursive,REPETITIONS,BENCH_FACT_ITERATIONS) $(call run_benchmark_ci,Push,REPETITIONS,BENCH_PUSH_ITERATIONS) @@ -102,6 +105,7 @@ render-benches: flamegraph-levm-benchmark: $(MAKE) flamegraph-levm-bench-generic name=Fibonacci repetitions=$(REPETITIONS) iterations=$(BENCH_FIB_ITERATIONS) + $(MAKE) flamegraph-levm-bench-generic name=FibonacciRecursive repetitions=$(REPETITIONS_SLOW) iterations=$(BENCH_FIB_REC_ITERATIONS) $(MAKE) flamegraph-levm-bench-generic name=Factorial repetitions=$(REPETITIONS) iterations=$(BENCH_FACT_ITERATIONS) $(MAKE) flamegraph-levm-bench-generic name=FactorialRecursive repetitions=$(REPETITIONS) iterations=$(BENCH_FACT_REC_ITERATIONS) $(MAKE) flamegraph-levm-bench-generic name=ManyHashes repetitions=$(REPETITIONS_SLOW) iterations=$(BENCH_HASHES_ITERATIONS) diff --git a/crates/vm/levm/bench/revm_comparison/contracts/FibonacciRecursive.sol b/crates/vm/levm/bench/revm_comparison/contracts/FibonacciRecursive.sol new file mode 100644 index 0000000000..bcd5c728f0 --- /dev/null +++ b/crates/vm/levm/bench/revm_comparison/contracts/FibonacciRecursive.sol @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.17; + +contract FibonacciRecursive { + function Benchmark(uint256 n) public view returns (uint256 result) { + if (n <= 1) return n; + + uint256 rec = this.Benchmark(n - 1) + this.Benchmark(n - 2); + + // Check for overflow + if (rec > (type(uint256).max / n)) { + return type(uint256).max; + } + + result = rec; + return result; + } +} From 0fbe568826efe286755b2d56e5f873aa0112d73f Mon Sep 17 00:00:00 2001 From: LeanSerra <46695152+LeanSerra@users.noreply.github.com> Date: Tue, 1 Jul 2025 12:16:50 -0300 Subject: [PATCH 34/40] feat(l2): replace custom merkle tree with `OpenZeppelin` + `lambdaworks` (#3344) **Motivation** We are using an unsafe (not audited) custom merkle tree implementation for the L1messaging system **Description** - Replace the merkle tree verify function in the CommonBridge contract to use OppenZeppelin's `MerkleProof.sol` contract - Replace our custom merkle tree implementation with lambdaworks' for this: - We implement the trait `IsMerkleTreeBackend` for H256 to build a tree that is compliant with - https://docs.openzeppelin.com/contracts/5.x/api/utils#MerkleProof - The implementation is taken from - https://github.com/yetanotherco/aligned_layer/blob/8a3a6448c974d09c645f3b74d4c9ff9d2dd27249/batcher/aligned-sdk/src/aggregation_layer/types.rs --- Cargo.lock | 16 +++- Cargo.toml | 1 + crates/l2/common/Cargo.toml | 2 + crates/l2/common/src/l1_messages.rs | 53 +++----------- crates/l2/common/src/lib.rs | 1 + crates/l2/common/src/merkle_tree.rs | 69 ++++++++++++++++++ crates/l2/common/src/state_diff.rs | 2 + crates/l2/contracts/src/l1/CommonBridge.sol | 67 +++++++++-------- .../src/l1/interfaces/ICommonBridge.sol | 6 +- crates/l2/contracts/src/l2/CommonBridgeL2.sol | 67 +++++++++++------ .../l2/contracts/src/l2/L2ToL1Messenger.sol | 7 +- .../src/l2/interfaces/IL2ToL1Messenger.sol | 3 +- crates/l2/networking/rpc/l2/l1_message.rs | 30 ++++++-- crates/l2/networking/rpc/utils.rs | 28 ------- .../l2/prover/zkvm/interface/sp1/Cargo.lock | 14 ++++ .../l2/prover/zkvm/interface/src/execution.rs | 9 +-- crates/l2/sdk/src/merkle_tree.rs | 73 ------------------- crates/l2/sdk/src/sdk.rs | 5 +- crates/l2/sequencer/errors.rs | 6 -- crates/l2/sequencer/l1_committer.rs | 5 +- crates/l2/tee/quote-gen/Cargo.lock | 16 +++- crates/networking/rpc/clients/eth/mod.rs | 2 +- fixtures/genesis/l2.json | 6 +- 23 files changed, 254 insertions(+), 234 deletions(-) create mode 100644 crates/l2/common/src/merkle_tree.rs delete mode 100644 crates/l2/sdk/src/merkle_tree.rs diff --git a/Cargo.lock b/Cargo.lock index 8e6b2ab8c3..4099fc08e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -121,7 +121,7 @@ dependencies = [ "ethers", "futures-util", "hex", - "lambdaworks-crypto", + "lambdaworks-crypto 0.12.0", "log", "reqwest 0.12.20", "serde", @@ -4005,8 +4005,10 @@ dependencies = [ "ethrex-trie", "ethrex-vm", "keccak-hash", + "lambdaworks-crypto 0.11.0", "lazy_static", "serde", + "sha3", "thiserror 2.0.12", ] @@ -6136,6 +6138,18 @@ dependencies = [ "regex-automata 0.4.9", ] +[[package]] +name = "lambdaworks-crypto" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ec4b462bbec171e1af821f3d9fff72e17de93b3d1022f29aa70fec8262c1cee" +dependencies = [ + "lambdaworks-math 0.11.0", + "serde", + "sha2", + "sha3", +] + [[package]] name = "lambdaworks-crypto" version = "0.12.0" diff --git a/Cargo.toml b/Cargo.toml index 3838419b73..05e7d993ab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -98,6 +98,7 @@ futures = "0.3.31" # When updating it try to build the TDX image and update service.nix with the new hash spawned-concurrency = {git = "https://github.com/lambdaclass/spawned.git", tag = "v0.1.2-alpha"} spawned-rt = {git = "https://github.com/lambdaclass/spawned.git", tag = "v0.1.2-alpha"} +lambdaworks-crypto = "0.11.0" [patch.crates-io] secp256k1 = { git = "https://github.com/sp1-patches/rust-secp256k1", tag = "patch-0.29.1-sp1-5.0.0" } diff --git a/crates/l2/common/Cargo.toml b/crates/l2/common/Cargo.toml index 6d3c4075b3..1b5a2c4fe9 100644 --- a/crates/l2/common/Cargo.toml +++ b/crates/l2/common/Cargo.toml @@ -15,6 +15,8 @@ thiserror.workspace = true keccak-hash.workspace = true serde.workspace = true lazy_static.workspace = true +lambdaworks-crypto.workspace = true +sha3.workspace = true [lints.clippy] unwrap_used = "deny" diff --git a/crates/l2/common/src/l1_messages.rs b/crates/l2/common/src/l1_messages.rs index 8e92da6484..3f10765d9d 100644 --- a/crates/l2/common/src/l1_messages.rs +++ b/crates/l2/common/src/l1_messages.rs @@ -2,13 +2,12 @@ use std::sync::LazyLock; use ethereum_types::{Address, H256}; use ethrex_common::{ - H160, + H160, U256, types::{Receipt, Transaction}, }; use keccak_hash::keccak; use serde::{Deserialize, Serialize}; -use thiserror::Error; pub const L1MESSENGER_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -24,6 +23,8 @@ pub struct L1Message { pub from: Address, /// Hash of the data given to the L1Messenger pub data_hash: H256, + /// Message id emitted by the bridge contract + pub message_id: U256, } impl L1Message { @@ -32,33 +33,25 @@ impl L1Message { bytes.extend_from_slice(&self.tx_hash.0); bytes.extend_from_slice(&self.from.to_fixed_bytes()); bytes.extend_from_slice(&self.data_hash.0); + bytes.extend_from_slice(&self.message_id.to_big_endian()); bytes } } -#[derive(Debug, Error)] -pub enum L1MessagingError { - #[error("Failed to merkelize messages")] - FailedToMerkelize, -} - pub fn get_l1_message_hash(msg: &L1Message) -> H256 { keccak(msg.encode()) } -pub fn get_block_l1_message_hashes( - txs: &[Transaction], - receipts: &[Receipt], -) -> Result, L1MessagingError> { - Ok(get_block_l1_messages(txs, receipts) +pub fn get_block_l1_message_hashes(txs: &[Transaction], receipts: &[Receipt]) -> Vec { + get_block_l1_messages(txs, receipts) .iter() .map(get_l1_message_hash) - .collect()) + .collect() } pub fn get_block_l1_messages(txs: &[Transaction], receipts: &[Receipt]) -> Vec { static L1MESSAGE_EVENT_SELECTOR: LazyLock = - LazyLock::new(|| keccak("L1Message(address,bytes32)".as_bytes())); + LazyLock::new(|| keccak("L1Message(address,bytes32,uint256)".as_bytes())); receipts .iter() @@ -75,38 +68,10 @@ pub fn get_block_l1_messages(txs: &[Transaction], receipts: &[Receipt]) -> Vec Result { - if !hashes.is_empty() { - merkelize(hashes) - } else { - Ok(H256::zero()) - } -} - -pub fn merkelize(data: &[H256]) -> Result { - let mut data = data.to_vec(); - let mut first = true; - while data.len() > 1 || first { - first = false; - data = data - .chunks(2) - .flat_map(|chunk| -> Result { - let left = chunk.first().ok_or(L1MessagingError::FailedToMerkelize)?; - let right = *chunk.get(1).unwrap_or(left); - Ok(keccak([left.as_bytes(), right.as_bytes()].concat()) - .as_fixed_bytes() - .into()) - }) - .collect(); - } - data.first() - .copied() - .ok_or(L1MessagingError::FailedToMerkelize) -} diff --git a/crates/l2/common/src/lib.rs b/crates/l2/common/src/lib.rs index c6c5a9cb17..876f3f802b 100644 --- a/crates/l2/common/src/lib.rs +++ b/crates/l2/common/src/lib.rs @@ -1,5 +1,6 @@ pub mod calldata; pub mod deposits; pub mod l1_messages; +pub mod merkle_tree; pub mod prover; pub mod state_diff; diff --git a/crates/l2/common/src/merkle_tree.rs b/crates/l2/common/src/merkle_tree.rs new file mode 100644 index 0000000000..a84a56c31f --- /dev/null +++ b/crates/l2/common/src/merkle_tree.rs @@ -0,0 +1,69 @@ +use keccak_hash::H256; +use lambdaworks_crypto::merkle_tree::{merkle::MerkleTree, traits::IsMerkleTreeBackend}; +use sha3::{Digest, Keccak256}; + +// We use a newtype wrapper around `H256` because Rust's orphan rule +// prevents implementing a foreign trait (`IsMerkleTreeBackend`) for a foreign type (`H256`). +#[derive(Default, Debug, PartialEq, Eq)] +struct TreeData(pub H256); + +// Code from https://github.com/yetanotherco/aligned_layer/blob/8a3a6448c974d09c645f3b74d4c9ff9d2dd27249/batcher/aligned-sdk/src/aggregation_layer/types.rs to build a merkle tree with commutative Keccak256 hashes +impl IsMerkleTreeBackend for TreeData { + type Data = TreeData; + type Node = [u8; 32]; + + /// We don't have to hash the data, as its already hashed + fn hash_data(leaf: &Self::Data) -> Self::Node { + leaf.0.to_fixed_bytes() + } + + /// Computes a commutative Keccak256 hash, ensuring H(a, b) == H(b, a). + /// + /// See: https://docs.openzeppelin.com/contracts/5.x/api/utils#Hashes + /// + /// Source: https://github.com/OpenZeppelin/openzeppelin-contracts/blob/1a87de932664d9b905612f4d9d1655fd27a41722/contracts/utils/cryptography/Hashes.sol#L17-L19 + /// + /// Compliant with OpenZeppelin's `verify` function from MerkleProof.sol. + /// + /// See: https://docs.openzeppelin.com/contracts/5.x/api/utils#MerkleProof + /// + /// Source: https://github.com/OpenZeppelin/openzeppelin-contracts/blob/1a87de932664d9b905612f4d9d1655fd27a41722/contracts/utils/cryptography/MerkleProof.sol#L114-L128 + fn hash_new_parent(child_1: &Self::Node, child_2: &Self::Node) -> Self::Node { + let mut hasher = Keccak256::new(); + if child_1 < child_2 { + hasher.update(child_1); + hasher.update(child_2); + } else { + hasher.update(child_2); + hasher.update(child_1); + } + hasher.finalize().into() + } +} + +pub fn compute_merkle_root(hashes: &[H256]) -> H256 { + let hashes = hashes + .iter() + .map(|hash| TreeData(*hash)) + .collect::>(); + // Merkle tree build only returns None when hashes is empty + let Some(tree) = MerkleTree::::build(&hashes) else { + return H256::zero(); + }; + H256::from(tree.root) +} + +pub fn compute_merkle_proof(hashes: &[H256], index: usize) -> Option> { + let hashes = hashes + .iter() + .map(|hash| TreeData(*hash)) + .collect::>(); + Some( + MerkleTree::::build(&hashes)? + .get_proof_by_pos(index)? + .merkle_path + .iter() + .map(H256::from) + .collect(), + ) +} diff --git a/crates/l2/common/src/state_diff.rs b/crates/l2/common/src/state_diff.rs index d5f1ebfbf9..e4ecc6e645 100644 --- a/crates/l2/common/src/state_diff.rs +++ b/crates/l2/common/src/state_diff.rs @@ -228,11 +228,13 @@ impl StateDiff { let tx = decoder.get_h256()?; let from = decoder.get_address()?; let data = decoder.get_h256()?; + let index = decoder.get_u256()?; l1messages.push(L1Message { from, data_hash: data, tx_hash: tx, + message_id: index, }); } diff --git a/crates/l2/contracts/src/l1/CommonBridge.sol b/crates/l2/contracts/src/l1/CommonBridge.sol index f04266f3d2..97fea19d94 100644 --- a/crates/l2/contracts/src/l1/CommonBridge.sol +++ b/crates/l2/contracts/src/l1/CommonBridge.sol @@ -7,6 +7,8 @@ import "@openzeppelin/contracts-upgradeable/access/Ownable2StepUpgradeable.sol"; import "@openzeppelin/contracts-upgradeable/utils/ReentrancyGuardUpgradeable.sol"; import "@openzeppelin/contracts/token/ERC20/IERC20.sol"; import "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol"; +import {MerkleProof} from "@openzeppelin/contracts/utils/cryptography/MerkleProof.sol"; + import "./interfaces/ICommonBridge.sol"; import "./interfaces/IOnChainProposer.sol"; import "../l2/interfaces/ICommonBridgeL2.sol"; @@ -27,7 +29,7 @@ contract CommonBridge is /// of the L2 transaction that requested the withdrawal. /// @dev The key is the hash of the L2 transaction that requested the /// withdrawal. - /// @dev The value is a boolean indicating if the withdrawal was claimed or not. + /// @dev Deprecated. mapping(bytes32 => bool) public claimedWithdrawals; /// @notice Mapping of merkle roots to the L2 withdrawal transaction logs. @@ -64,6 +66,13 @@ contract CommonBridge is address public constant ETH_TOKEN = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE; + /// @notice Mapping of unclaimed withdrawals. A withdrawal is claimed if + /// there is a non-zero value in the mapping for the message id + /// of the L2 transaction that requested the withdrawal. + /// @dev The key is the message id of the L1Message of the transaction. + /// @dev The value is a boolean indicating if the withdrawal was claimed or not. + mapping(uint256 => bool) public claimedWithdrawalIDs; + modifier onlyOnChainProposer() { require( msg.sender == ON_CHAIN_PROPOSER, @@ -129,7 +138,7 @@ contract CommonBridge is function sendToL2(SendValues calldata sendValues) public { _sendToL2(msg.sender, sendValues); } - + /// @inheritdoc ICommonBridge function deposit(address l2Recipient) public payable { _deposit(l2Recipient); @@ -137,7 +146,10 @@ contract CommonBridge is function _deposit(address l2Recipient) private { deposits[ETH_TOKEN][ETH_TOKEN] += msg.value; - bytes memory callData = abi.encodeCall(ICommonBridgeL2.mintETH, (l2Recipient)); + bytes memory callData = abi.encodeCall( + ICommonBridgeL2.mintETH, + (l2Recipient) + ); SendValues memory sendValues = SendValues({ to: L2_BRIDGE_ADDRESS, gasLimit: 21000 * 5, @@ -243,7 +255,7 @@ contract CommonBridge is bytes32 l2WithdrawalTxHash, uint256 claimedAmount, uint256 withdrawalBatchNumber, - uint256 withdrawalLogIndex, + uint256 withdrawalMessageId, bytes32[] calldata withdrawalProof ) public { _claimWithdrawal( @@ -252,7 +264,7 @@ contract CommonBridge is ETH_TOKEN, claimedAmount, withdrawalBatchNumber, - withdrawalLogIndex, + withdrawalMessageId, withdrawalProof ); (bool success, ) = payable(msg.sender).call{value: claimedAmount}(""); @@ -266,7 +278,7 @@ contract CommonBridge is address tokenL2, uint256 claimedAmount, uint256 withdrawalBatchNumber, - uint256 withdrawalLogIndex, + uint256 withdrawalMessageId, bytes32[] calldata withdrawalProof ) public nonReentrant { _claimWithdrawal( @@ -275,7 +287,7 @@ contract CommonBridge is tokenL2, claimedAmount, withdrawalBatchNumber, - withdrawalLogIndex, + withdrawalMessageId, withdrawalProof ); require( @@ -291,7 +303,7 @@ contract CommonBridge is address tokenL2, uint256 claimedAmount, uint256 withdrawalBatchNumber, - uint256 withdrawalLogIndex, + uint256 withdrawalMessageId, bytes32[] calldata withdrawalProof ) private { require( @@ -302,9 +314,6 @@ contract CommonBridge is bytes32 msgHash = keccak256( abi.encodePacked(tokenL1, tokenL2, msg.sender, claimedAmount) ); - bytes32 withdrawalId = keccak256( - abi.encodePacked(withdrawalBatchNumber, withdrawalLogIndex) - ); require( batchWithdrawalLogsMerkleRoots[withdrawalBatchNumber] != bytes32(0), "CommonBridge: the batch that emitted the withdrawal logs was not committed" @@ -315,17 +324,17 @@ contract CommonBridge is "CommonBridge: the batch that emitted the withdrawal logs was not verified" ); require( - claimedWithdrawals[withdrawalId] == false, + claimedWithdrawalIDs[withdrawalMessageId] == false, "CommonBridge: the withdrawal was already claimed" ); - claimedWithdrawals[withdrawalId] = true; - emit WithdrawalClaimed(withdrawalId); + claimedWithdrawalIDs[withdrawalMessageId] = true; + emit WithdrawalClaimed(withdrawalMessageId); require( _verifyMessageProof( l2WithdrawalTxHash, msgHash, withdrawalBatchNumber, - withdrawalLogIndex, + withdrawalMessageId, withdrawalProof ), "CommonBridge: Invalid proof" @@ -336,27 +345,23 @@ contract CommonBridge is bytes32 l2WithdrawalTxHash, bytes32 msgHash, uint256 withdrawalBatchNumber, - uint256 withdrawalLogIndex, + uint256 withdrawalMessageId, bytes32[] calldata withdrawalProof ) internal view returns (bool) { bytes32 withdrawalLeaf = keccak256( - abi.encodePacked(l2WithdrawalTxHash, L2_BRIDGE_ADDRESS, msgHash) + abi.encodePacked( + l2WithdrawalTxHash, + L2_BRIDGE_ADDRESS, + msgHash, + withdrawalMessageId + ) ); - for (uint256 i = 0; i < withdrawalProof.length; i++) { - if (withdrawalLogIndex % 2 == 0) { - withdrawalLeaf = keccak256( - abi.encodePacked(withdrawalLeaf, withdrawalProof[i]) - ); - } else { - withdrawalLeaf = keccak256( - abi.encodePacked(withdrawalProof[i], withdrawalLeaf) - ); - } - withdrawalLogIndex /= 2; - } return - withdrawalLeaf == - batchWithdrawalLogsMerkleRoots[withdrawalBatchNumber]; + MerkleProof.verify( + withdrawalProof, + batchWithdrawalLogsMerkleRoots[withdrawalBatchNumber], + withdrawalLeaf + ); } /// @notice Allow owner to upgrade the contract. diff --git a/crates/l2/contracts/src/l1/interfaces/ICommonBridge.sol b/crates/l2/contracts/src/l1/interfaces/ICommonBridge.sol index 86f5db0de9..0e01e121af 100644 --- a/crates/l2/contracts/src/l1/interfaces/ICommonBridge.sol +++ b/crates/l2/contracts/src/l1/interfaces/ICommonBridge.sol @@ -39,10 +39,8 @@ interface ICommonBridge { /// @notice A withdrawal has been claimed. /// @dev Event emitted when a withdrawal is claimed. - /// @param withdrawalId the hash of the batch and index of the withdrawal - event WithdrawalClaimed( - bytes32 indexed withdrawalId - ); + /// @param withdrawalId the message Id of the claimed withdrawal + event WithdrawalClaimed(uint256 indexed withdrawalId); struct SendValues { address to; diff --git a/crates/l2/contracts/src/l2/CommonBridgeL2.sol b/crates/l2/contracts/src/l2/CommonBridgeL2.sol index e77cba5fdd..671cb47d4d 100644 --- a/crates/l2/contracts/src/l2/CommonBridgeL2.sol +++ b/crates/l2/contracts/src/l2/CommonBridgeL2.sol @@ -8,16 +8,20 @@ import "./interfaces/IERC20L2.sol"; /// @title CommonBridge L2 contract. /// @author LambdaClass contract CommonBridgeL2 is ICommonBridgeL2 { - address public constant L1_MESSENGER = + address public constant L1_MESSENGER = 0x000000000000000000000000000000000000FFFE; address public constant BURN_ADDRESS = 0x0000000000000000000000000000000000000000; /// @notice Token address used to represent ETH - address public constant ETH_TOKEN = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE; + address public constant ETH_TOKEN = + 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE; // Some calls come as a privileged transaction, whose sender is the bridge itself. modifier onlySelf() { - require(msg.sender == address(this), "CommonBridgeL2: caller is not the bridge"); + require( + msg.sender == address(this), + "CommonBridgeL2: caller is not the bridge" + ); _; } @@ -29,14 +33,13 @@ contract CommonBridgeL2 is ICommonBridgeL2 { emit WithdrawalInitiated(msg.sender, _receiverOnL1, msg.value); - IL2ToL1Messenger(L1_MESSENGER).sendMessageToL1(keccak256(abi.encodePacked( - ETH_TOKEN, - ETH_TOKEN, - _receiverOnL1, - msg.value - ))); + IL2ToL1Messenger(L1_MESSENGER).sendMessageToL1( + keccak256( + abi.encodePacked(ETH_TOKEN, ETH_TOKEN, _receiverOnL1, msg.value) + ) + ); } - + function mintETH(address to) external payable { (bool success, ) = to.call{value: msg.value}(""); if (!success) { @@ -45,33 +48,55 @@ contract CommonBridgeL2 is ICommonBridgeL2 { emit DepositProcessed(to, msg.value); } - function mintERC20(address tokenL1, address tokenL2, address destination, uint256 amount) external onlySelf { - (bool success, ) = address(this).call(abi.encodeCall(this.tryMintERC20, (tokenL1, tokenL2, destination, amount))); + function mintERC20( + address tokenL1, + address tokenL2, + address destination, + uint256 amount + ) external onlySelf { + (bool success, ) = address(this).call( + abi.encodeCall( + this.tryMintERC20, + (tokenL1, tokenL2, destination, amount) + ) + ); if (!success) { _withdraw(tokenL1, tokenL2, destination, amount); } emit ERC20DepositProcessed(tokenL1, tokenL2, destination, amount); } - function tryMintERC20(address tokenL1, address tokenL2, address destination, uint256 amount) external onlySelf { + function tryMintERC20( + address tokenL1, + address tokenL2, + address destination, + uint256 amount + ) external onlySelf { IERC20L2 token = IERC20L2(tokenL2); require(token.l1Address() == tokenL1); token.crosschainMint(destination, amount); } - function withdrawERC20(address tokenL1, address tokenL2, address destination, uint256 amount) external { + function withdrawERC20( + address tokenL1, + address tokenL2, + address destination, + uint256 amount + ) external { require(amount > 0, "Withdrawal amount must be positive"); IERC20L2(tokenL2).crosschainBurn(msg.sender, amount); emit ERC20WithdrawalInitiated(tokenL1, tokenL2, destination, amount); _withdraw(tokenL1, tokenL2, destination, amount); } - function _withdraw(address tokenL1, address tokenL2, address destination, uint256 amount) private { - IL2ToL1Messenger(L1_MESSENGER).sendMessageToL1(keccak256(abi.encodePacked( - tokenL1, - tokenL2, - destination, - amount - ))); + function _withdraw( + address tokenL1, + address tokenL2, + address destination, + uint256 amount + ) private { + IL2ToL1Messenger(L1_MESSENGER).sendMessageToL1( + keccak256(abi.encodePacked(tokenL1, tokenL2, destination, amount)) + ); } } diff --git a/crates/l2/contracts/src/l2/L2ToL1Messenger.sol b/crates/l2/contracts/src/l2/L2ToL1Messenger.sol index 37dd841c76..5e7f32dfae 100644 --- a/crates/l2/contracts/src/l2/L2ToL1Messenger.sol +++ b/crates/l2/contracts/src/l2/L2ToL1Messenger.sol @@ -6,9 +6,14 @@ import "./interfaces/IL2ToL1Messenger.sol"; /// @title L2ToL1Messenger contract. /// @author LambdaClass contract L2ToL1Messenger is IL2ToL1Messenger { + /// @notice Id of the last emitted message. + /// @dev Message Id that should be incremented before a message is sent + uint256 public lastMessageId; + function sendMessageToL1(bytes32 data) external { // This event gets pushed to L1, the sequencer monitors // them on every block. - emit L1Message(msg.sender, data); + lastMessageId += 1; + emit L1Message(msg.sender, data, lastMessageId); } } diff --git a/crates/l2/contracts/src/l2/interfaces/IL2ToL1Messenger.sol b/crates/l2/contracts/src/l2/interfaces/IL2ToL1Messenger.sol index 7e5b91a952..4d2d701543 100644 --- a/crates/l2/contracts/src/l2/interfaces/IL2ToL1Messenger.sol +++ b/crates/l2/contracts/src/l2/interfaces/IL2ToL1Messenger.sol @@ -12,7 +12,8 @@ interface IL2ToL1Messenger { /// @param data the data being sent, usually a hash event L1Message( address indexed senderOnL2, - bytes32 indexed data + bytes32 indexed data, + uint256 indexed messageId ); /// @notice Sends the given data to the L1 diff --git a/crates/l2/networking/rpc/l2/l1_message.rs b/crates/l2/networking/rpc/l2/l1_message.rs index b6ab36b138..ba03341647 100644 --- a/crates/l2/networking/rpc/l2/l1_message.rs +++ b/crates/l2/networking/rpc/l2/l1_message.rs @@ -1,11 +1,15 @@ -use ethrex_l2_common::l1_messages::get_block_l1_message_hashes; use keccak_hash::H256; use serde_json::Value; use tracing::info; use crate::{ rpc::{RpcApiContext, RpcHandler}, - utils::{RpcErr, merkle_proof}, + utils::RpcErr, +}; + +use ethrex_l2_common::{ + l1_messages::{get_block_l1_messages, get_l1_message_hash}, + merkle_tree::compute_merkle_proof, }; pub struct GetL1MessageProof { @@ -56,8 +60,11 @@ impl RpcHandler for GetL1MessageProof { }; // Gets the message hashes from the transaction - let tx_message_hashes = get_block_l1_message_hashes(&[tx], &[tx_receipt]) - .map_err(|e| ethrex_rpc::RpcErr::Internal(e.to_string()))?; + let tx_messages = get_block_l1_messages(&[tx], &[tx_receipt]); + let tx_message_hashes = tx_messages + .iter() + .map(get_l1_message_hash) + .collect::>(); // Gets the batch number for the block let batch_number = match context @@ -81,18 +88,25 @@ impl RpcHandler for GetL1MessageProof { let mut proofs = vec![]; for (index, message_hash) in batch_message_hashes.iter().enumerate() { - if !tx_message_hashes.contains(message_hash) { + let Some(message_idx) = tx_message_hashes + .iter() + .position(|hash| hash == message_hash) + else { + continue; + }; + + let Some(message) = tx_messages.get(message_idx) else { continue; - } + }; // Calculates the merkle proof of the batch - let Some(path) = merkle_proof(batch_message_hashes.clone(), index) else { + let Some(path) = compute_merkle_proof(&batch_message_hashes, index) else { return Ok(Value::Null); }; let proof = ethrex_rpc::clients::eth::L1MessageProof { batch_number, - index, + message_id: message.message_id, message_hash: *message_hash, merkle_proof: path, }; diff --git a/crates/l2/networking/rpc/utils.rs b/crates/l2/networking/rpc/utils.rs index 25c06efc7a..67d28534a9 100644 --- a/crates/l2/networking/rpc/utils.rs +++ b/crates/l2/networking/rpc/utils.rs @@ -1,8 +1,6 @@ -use ethrex_common::H256; use ethrex_rpc::utils::RpcErrorMetadata; use ethrex_storage::error::StoreError; use ethrex_storage_rollup::RollupStoreError; -use keccak_hash::keccak; use serde_json::Value; #[derive(Debug, thiserror::Error)] @@ -87,29 +85,3 @@ pub fn parse_json_hex(hex: &serde_json::Value) -> Result { let maybe_parsed = u64::from_str_radix(trimmed, 16); maybe_parsed.map_err(|_| format!("Could not parse given hex {maybe_hex}")) } - -pub fn merkle_proof(data: Vec, mut index: usize) -> Option> { - if index >= data.len() { - return None; - } - - let mut proof = vec![]; - let mut current = data.clone(); - let mut first = true; - while current.len() > 1 || first { - first = false; - proof.push(*current.get(index ^ 1).or(current.get(index))?); - index /= 2; - current = current - .chunks(2) - .map(|chunk| -> H256 { - let left = *chunk.first().unwrap_or(&H256::zero()); - let right = *chunk.get(1).unwrap_or(&left); - keccak([left.as_bytes(), right.as_bytes()].concat()) - .as_fixed_bytes() - .into() - }) - .collect(); - } - Some(proof) -} diff --git a/crates/l2/prover/zkvm/interface/sp1/Cargo.lock b/crates/l2/prover/zkvm/interface/sp1/Cargo.lock index 021ee9c5c0..a4c2180d6f 100644 --- a/crates/l2/prover/zkvm/interface/sp1/Cargo.lock +++ b/crates/l2/prover/zkvm/interface/sp1/Cargo.lock @@ -1316,8 +1316,10 @@ dependencies = [ "ethrex-trie", "ethrex-vm", "keccak-hash", + "lambdaworks-crypto", "lazy_static", "serde", + "sha3", "thiserror", ] @@ -1913,6 +1915,18 @@ dependencies = [ "spin", ] +[[package]] +name = "lambdaworks-crypto" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ec4b462bbec171e1af821f3d9fff72e17de93b3d1022f29aa70fec8262c1cee" +dependencies = [ + "lambdaworks-math", + "serde", + "sha2", + "sha3", +] + [[package]] name = "lambdaworks-math" version = "0.11.0" diff --git a/crates/l2/prover/zkvm/interface/src/execution.rs b/crates/l2/prover/zkvm/interface/src/execution.rs index 4672c51672..b13fd816c6 100644 --- a/crates/l2/prover/zkvm/interface/src/execution.rs +++ b/crates/l2/prover/zkvm/interface/src/execution.rs @@ -24,7 +24,7 @@ use ethrex_common::types::{ }; use ethrex_l2_common::{ deposits::{DepositError, compute_deposit_logs_hash, get_block_deposits}, - l1_messages::{L1MessagingError, compute_merkle_root, get_block_l1_messages}, + l1_messages::get_block_l1_messages, state_diff::{StateDiff, StateDiffError, prepare_state_diff}, }; #[cfg(feature = "l2")] @@ -45,9 +45,6 @@ pub enum StatelessExecutionError { #[error("EVM error: {0}")] EvmError(#[from] EvmError), #[cfg(feature = "l2")] - #[error("L1Message calculation error: {0}")] - L1MessageError(#[from] L1MessagingError), - #[cfg(feature = "l2")] #[error("Deposit calculation error: {0}")] DepositError(#[from] DepositError), #[cfg(feature = "l2")] @@ -348,7 +345,7 @@ fn compute_l1messages_and_deposits_digests( l1messages: &[L1Message], deposits: &[PrivilegedL2Transaction], ) -> Result<(H256, H256), StatelessExecutionError> { - use ethrex_l2_common::l1_messages::get_l1_message_hash; + use ethrex_l2_common::{l1_messages::get_l1_message_hash, merkle_tree::compute_merkle_root}; let message_hashes: Vec<_> = l1messages.iter().map(get_l1_message_hash).collect(); let deposit_hashes: Vec<_> = deposits @@ -357,7 +354,7 @@ fn compute_l1messages_and_deposits_digests( .map(|hash| hash.ok_or(StatelessExecutionError::InvalidDeposit)) .collect::>()?; - let l1message_merkle_root = compute_merkle_root(&message_hashes)?; + let l1message_merkle_root = compute_merkle_root(&message_hashes); let deposit_logs_hash = compute_deposit_logs_hash(deposit_hashes).map_err(StatelessExecutionError::DepositError)?; diff --git a/crates/l2/sdk/src/merkle_tree.rs b/crates/l2/sdk/src/merkle_tree.rs deleted file mode 100644 index eec2d0351f..0000000000 --- a/crates/l2/sdk/src/merkle_tree.rs +++ /dev/null @@ -1,73 +0,0 @@ -use ethrex_common::H256; -use keccak_hash::keccak; -use serde::{Deserialize, Serialize}; -use tracing::info; - -#[derive(Debug, thiserror::Error, Clone, Serialize, Deserialize)] -pub enum MerkleError { - #[error("Left element is None")] - LeftElementIsNone(), - #[error("Data vector is empty")] - DataVectorIsEmpty(), -} - -pub fn merkelize(data: Vec) -> Result { - info!("Merkelizing {:?}", data); - let mut data = data; - let mut first = true; - while data.len() > 1 || first { - first = false; - data = data - .chunks(2) - .flat_map(|chunk| -> Result { - let left = chunk.first().ok_or(MerkleError::LeftElementIsNone())?; - let right = *chunk.get(1).unwrap_or(left); - Ok(keccak([left.as_bytes(), right.as_bytes()].concat()) - .as_fixed_bytes() - .into()) - }) - .collect(); - } - data.first() - .copied() - .ok_or(MerkleError::DataVectorIsEmpty()) -} - -pub fn merkle_proof(data: Vec, base_element: H256) -> Result>, MerkleError> { - if !data.contains(&base_element) { - return Ok(None); - } - - let mut proof = vec![]; - let mut data = data; - - let mut target_hash = base_element; - let mut first = true; - while data.len() > 1 || first { - first = false; - let current_target = target_hash; - data = data - .chunks(2) - .flat_map(|chunk| -> Result { - let left = chunk - .first() - .copied() - .ok_or(MerkleError::LeftElementIsNone())?; - let right = chunk.get(1).copied().unwrap_or(left); - let result = keccak([left.as_bytes(), right.as_bytes()].concat()) - .as_fixed_bytes() - .into(); - if left == current_target { - proof.push(right); - target_hash = result; - } else if right == current_target { - proof.push(left); - target_hash = result; - } - Ok(result) - }) - .collect(); - } - - Ok(Some(proof)) -} diff --git a/crates/l2/sdk/src/sdk.rs b/crates/l2/sdk/src/sdk.rs index 5a8db8c9fe..a450e9a679 100644 --- a/crates/l2/sdk/src/sdk.rs +++ b/crates/l2/sdk/src/sdk.rs @@ -19,7 +19,6 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; pub mod calldata; pub mod l1_to_l2_tx_data; -pub mod merkle_tree; pub use l1_to_l2_tx_data::{L1ToL2TransactionData, send_l1_to_l2_tx}; use tracing::{info, trace}; @@ -181,7 +180,7 @@ pub async fn claim_withdraw( )), Value::Uint(amount), Value::Uint(message_proof.batch_number.into()), - Value::Uint(U256::from(message_proof.index)), + Value::Uint(message_proof.message_id), Value::Array( message_proof .merkle_proof @@ -236,7 +235,7 @@ pub async fn claim_erc20withdraw( Value::Address(token_l2), Value::Uint(amount), Value::Uint(U256::from(message_proof.batch_number)), - Value::Uint(U256::from(message_proof.index)), + Value::Uint(message_proof.message_id), Value::Array( message_proof .merkle_proof diff --git a/crates/l2/sequencer/errors.rs b/crates/l2/sequencer/errors.rs index 0a44475275..7afa0d96a3 100644 --- a/crates/l2/sequencer/errors.rs +++ b/crates/l2/sequencer/errors.rs @@ -5,10 +5,8 @@ use ethereum_types::FromStrRadixErr; use ethrex_blockchain::error::{ChainError, InvalidForkChoice}; use ethrex_common::types::{BlobsBundleError, FakeExponentialError}; use ethrex_l2_common::deposits::DepositError; -use ethrex_l2_common::l1_messages::L1MessagingError; use ethrex_l2_common::prover::ProverType; use ethrex_l2_common::state_diff::StateDiffError; -use ethrex_l2_sdk::merkle_tree::MerkleError; use ethrex_rpc::clients::EngineClientError; use ethrex_rpc::clients::eth::errors::{CalldataEncodeError, EthClientError}; use ethrex_storage::error::StoreError; @@ -229,8 +227,6 @@ pub enum CommitterError { FailedToSendCommitment(String), #[error("Committer failed to decode deposit hash")] FailedToDecodeDepositHash, - #[error("Committer failed to merkelize: {0}")] - FailedToMerkelize(#[from] MerkleError), #[error("Withdrawal transaction was invalid")] InvalidWithdrawalTransaction, #[error("Blob estimation failed: {0}")] @@ -245,8 +241,6 @@ pub enum CommitterError { FailedToGetWithdrawals(#[from] UtilsError), #[error("Deposit error: {0}")] DepositError(#[from] DepositError), - #[error("L1Message error: {0}")] - L1MessageError(#[from] L1MessagingError), // TODO: Avoid propagating GenServerErrors outside GenServer modules // See https://github.com/lambdaclass/ethrex/issues/3376 #[error("Spawned GenServer Error")] diff --git a/crates/l2/sequencer/l1_committer.rs b/crates/l2/sequencer/l1_committer.rs index a9bff70b4d..4d538f9921 100644 --- a/crates/l2/sequencer/l1_committer.rs +++ b/crates/l2/sequencer/l1_committer.rs @@ -16,7 +16,8 @@ use ethrex_common::{ use ethrex_l2_common::{ calldata::Value, deposits::{compute_deposit_logs_hash, get_block_deposits}, - l1_messages::{compute_merkle_root, get_block_l1_messages, get_l1_message_hash}, + l1_messages::{get_block_l1_messages, get_l1_message_hash}, + merkle_tree::compute_merkle_root, state_diff::{StateDiff, prepare_state_diff}, }; use ethrex_l2_sdk::calldata::encode_calldata; @@ -465,7 +466,7 @@ async fn send_commitment( state: &mut CommitterState, batch: &Batch, ) -> Result { - let messages_merkle_root = compute_merkle_root(&batch.message_hashes)?; + let messages_merkle_root = compute_merkle_root(&batch.message_hashes); let last_block_hash = get_last_block_hash(&state.store, batch.last_block)?; let mut calldata_values = vec![ diff --git a/crates/l2/tee/quote-gen/Cargo.lock b/crates/l2/tee/quote-gen/Cargo.lock index 5532944d1c..c8d9558ec0 100644 --- a/crates/l2/tee/quote-gen/Cargo.lock +++ b/crates/l2/tee/quote-gen/Cargo.lock @@ -68,7 +68,7 @@ dependencies = [ "ethers", "futures-util", "hex", - "lambdaworks-crypto", + "lambdaworks-crypto 0.12.0", "log", "reqwest 0.12.20", "serde", @@ -2295,8 +2295,10 @@ dependencies = [ "ethrex-trie", "ethrex-vm", "keccak-hash", + "lambdaworks-crypto 0.11.0", "lazy_static", "serde", + "sha3", "thiserror 2.0.12", ] @@ -3676,6 +3678,18 @@ dependencies = [ "regex-automata 0.4.9", ] +[[package]] +name = "lambdaworks-crypto" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ec4b462bbec171e1af821f3d9fff72e17de93b3d1022f29aa70fec8262c1cee" +dependencies = [ + "lambdaworks-math 0.11.0", + "serde", + "sha2", + "sha3", +] + [[package]] name = "lambdaworks-crypto" version = "0.12.0" diff --git a/crates/networking/rpc/clients/eth/mod.rs b/crates/networking/rpc/clients/eth/mod.rs index ac8c38d39a..ea600637d8 100644 --- a/crates/networking/rpc/clients/eth/mod.rs +++ b/crates/networking/rpc/clients/eth/mod.rs @@ -97,7 +97,7 @@ pub const ERROR_FUNCTION_SELECTOR: [u8; 4] = [0x08, 0xc3, 0x79, 0xa0]; #[derive(Serialize, Deserialize, Debug)] pub struct L1MessageProof { pub batch_number: u64, - pub index: usize, + pub message_id: U256, pub message_hash: H256, pub merkle_proof: Vec, } diff --git a/fixtures/genesis/l2.json b/fixtures/genesis/l2.json index 74b9029fa4..2ad6ab8ddc 100644 --- a/fixtures/genesis/l2.json +++ b/fixtures/genesis/l2.json @@ -41,16 +41,16 @@ "coinbase": "0x0000000000000000000000000000000000000000", "alloc": { "0x000000000000000000000000000000000000fffe": { - "code": "0x60806040526004361015610013575b6100bb565b61001d5f3561002b565b62cffbe50361000e57610088565b60e01c90565b60405190565b5f80fd5b5f80fd5b90565b61004b8161003f565b0361005257565b5f80fd5b9050359061006382610042565b565b9060208282031261007e5761007b915f01610056565b90565b61003b565b5f0190565b346100b6576100a061009b366004610065565b61010d565b6100a8610031565b806100b281610083565b0390f35b610037565b5f80fd5b60018060a01b031690565b90565b6100e16100dc6100e6926100bf565b6100ca565b6100bf565b90565b6100f2906100cd565b90565b6100fe906100e9565b90565b61010a9061003f565b90565b339061014261013c7fe2eb319166f66bdc0da4ccabd81814f7d4e5689f7860243bbfeb3a7d461421f5936100f5565b91610101565b9161014b610031565b8061015581610083565b0390a356fea2646970667358221220f78418bd909dc7b09815aacd7f56e47c5a54cbecef0d36d9bc84e188c027794f64736f6c634300081d0033", + "code": "0x608060405234801561000f575f5ffd5b5060043610610033575f3560e01c8062cffbe51461003757806360206aab14610053575b5f5ffd5b610051600480360381019061004c919061010e565b610071565b005b61005b6100d2565b6040516100689190610151565b60405180910390f35b60015f5f8282546100829190610197565b925050819055505f54813373ffffffffffffffffffffffffffffffffffffffff167f18d7b705344d616d1b61daa6a8ccfcf9f10c27ade007cc45cf870d1e121f1a9d60405160405180910390a450565b5f5481565b5f5ffd5b5f819050919050565b6100ed816100db565b81146100f7575f5ffd5b50565b5f81359050610108816100e4565b92915050565b5f60208284031215610123576101226100d7565b5b5f610130848285016100fa565b91505092915050565b5f819050919050565b61014b81610139565b82525050565b5f6020820190506101645f830184610142565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f6101a182610139565b91506101ac83610139565b92508282019050808211156101c4576101c361016a565b5b9291505056fea26469706673582212203b15dc17337b8c5248e1ef278d676433350c062e7160f6221b42bfbeb3682d7364736f6c634300081d0033", "storage": {}, "balance": "0x0", "nonce": "0x1" }, "0x000000000000000000000000000000000000ffff": { - "code": "0x60806040526004361015610013575b610383565b61001d5f3561009c565b806351cff8d91461009757806358bc83371461009257806379204fe01461008d57806379c0cdef146100885780637e1233a914610083578063b0f4d3951461007e578063d23061db146100795763fccc28130361000e5761034e565b610308565b6102df565b6102aa565b610262565b61022b565b61018a565b610109565b60e01c90565b60405190565b5f80fd5b60018060a01b031690565b6100c0906100ac565b90565b6100cc816100b7565b036100d357565b5f80fd5b905035906100e4826100c3565b565b906020828203126100ff576100fc915f016100d7565b90565b6100a8565b5f0190565b61011c6101173660046100e6565b610710565b6101246100a2565b8061012e81610104565b0390f35b5f80fd5b5f91031261014057565b6100a8565b73eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee90565b610165610145565b90565b610171906100b7565b9052565b9190610188905f60208501940190610168565b565b346101ba5761019a366004610136565b6101b66101a561015d565b6101ad6100a2565b91829182610175565b0390f35b610132565b90565b6101cb816101bf565b036101d257565b5f80fd5b905035906101e3826101c2565b565b608081830312610226576101fb825f83016100d7565b9261022361020c84602085016100d7565b9361021a81604086016100d7565b936060016101d6565b90565b6100a8565b3461025d5761024761023e3660046101e5565b92919091610af7565b61024f6100a2565b8061025981610104565b0390f35b610132565b346102945761027e6102753660046101e5565b92919091610ce9565b6102866100a2565b8061029081610104565b0390f35b610132565b61fffe90565b6102a7610299565b90565b346102da576102ba366004610136565b6102d66102c561029f565b6102cd6100a2565b91829182610175565b0390f35b610132565b6102f26102ed3660046100e6565b610cf7565b6102fa6100a2565b8061030481610104565b0390f35b3461033a5761032461031b3660046101e5565b92919091610dfe565b61032c6100a2565b8061033681610104565b0390f35b610132565b5f90565b61034b61033f565b90565b3461037e5761035e366004610136565b61037a610369610343565b6103716100a2565b91829182610175565b0390f35b610132565b5f80fd5b90565b90565b6103a161039c6103a692610387565b61038a565b6101bf565b90565b60209181520190565b60207f7665000000000000000000000000000000000000000000000000000000000000917f5769746864726177616c20616d6f756e74206d75737420626520706f736974695f8201520152565b61040c60226040926103a9565b610415816103b2565b0190565b61042e9060208101905f8183039101526103ff565b90565b1561043857565b6104406100a2565b62461bcd60e51b81528061045660048201610419565b0390fd5b905090565b61046a5f809261045a565b0190565b6104779061045f565b90565b601f801991011690565b634e487b7160e01b5f52604160045260245ffd5b906104a29061047a565b810190811067ffffffffffffffff8211176104bc57604052565b610484565b906104d46104cd6100a2565b9283610498565b565b67ffffffffffffffff81116104f4576104f060209161047a565b0190565b610484565b9061050b610506836104d6565b6104c1565b918252565b606090565b3d5f14610530576105253d6104f9565b903d5f602084013e5b565b610538610510565b9061052e565b5f7f4661696c656420746f206275726e204574686572000000000000000000000000910152565b61057260146020926103a9565b61057b8161053e565b0190565b6105949060208101905f818303910152610565565b90565b1561059e57565b6105a66100a2565b62461bcd60e51b8152806105bc6004820161057f565b0390fd5b6105d46105cf6105d9926100ac565b61038a565b6100ac565b90565b6105e5906105c0565b90565b6105f1906105dc565b90565b61060861060361060d926101bf565b61038a565b6101bf565b90565b610619906105c0565b90565b61062590610610565b90565b610631906105dc565b90565b60601b90565b61064390610634565b90565b61064f9061063a565b90565b61065e610663916100b7565b610646565b9052565b90565b61067661067b916101bf565b610667565b9052565b926106ac60146106b4946106a4828861069c60209b9a8399610652565b018092610652565b018092610652565b01809261066a565b0190565b60200190565b5190565b5f80fd5b60e01b90565b5f9103126106d657565b6100a8565b90565b6106e7906106db565b9052565b91906106fe905f602085019401906106de565b565b6107086100a2565b3d5f823e3d90fd5b61072c346107266107205f61038d565b916101bf565b11610431565b61075f5f8061073961033f565b346107426100a2565b908161074d8161046e565b03925af1610759610515565b50610597565b3381349061079f6107996107937fbb2689ff876f7ef453cf8865dde5ab10349d222e2e1383c5152fbdb083f02da2956105e8565b926105e8565b926105f4565b926107a86100a2565b806107b281610104565b0390a46107cd6107c86107c3610299565b61061c565b610628565b9062cffbe59061080f6107de610145565b6108006107e9610145565b9334906107f46100a2565b9586946020860161067f565b60208201810382520382610498565b61082161081b826106be565b916106b8565b20823b1561089557610852926108475f809461083b6100a2565b968795869485936106c6565b8352600483016106eb565b03925af1801561089057610864575b50565b610883905f3d8111610889575b61087b8183610498565b8101906106cc565b5f610861565b503d610871565b610700565b6106c2565b6108a3906105dc565b90565b60207f6520627269646765000000000000000000000000000000000000000000000000917f436f6d6d6f6e4272696467654c323a2063616c6c6572206973206e6f742074685f8201520152565b61090060286040926103a9565b610909816108a6565b0190565b6109229060208101905f8183039101526108f3565b90565b1561092c57565b6109346100a2565b62461bcd60e51b81528061094a6004820161090d565b0390fd5b9061097e9392916109793361097361096d6109683061089a565b6100b7565b916100b7565b14610925565b610a16565b565b63ffffffff1690565b63ffffffff60e01b1690565b6109a96109a46109ae92610980565b6106c6565b610989565b90565b6109ba906101bf565b9052565b6109f36109fa946109e96060949897956109df608086019a5f870190610168565b6020850190610168565b6040830190610168565b01906109b1565b565b151590565b9190610a14905f602085019401906109b1565b565b92919092610a855f80610a283061089a565b6004610a6c6379c0cdef610a5d88918b8d610a448d9293610995565b94610a4d6100a2565b97889660208801908152016109be565b60208201810382520382610498565b82602082019151925af1610a7e610515565b50156109fc565b610ae4575b92909192610adf610acd610ac7610ac17ff5353a2477e10b23280de25ca6cea55c17bb48000d8807ee631e514080e7fb4e946105e8565b946105e8565b946105e8565b94610ad66100a2565b91829182610a01565b0390a4565b610af2818584908692610f16565b610a8a565b90610b0393929161094e565b565b90610b35939291610b3033610b2a610b24610b1f3061089a565b6100b7565b916100b7565b14610925565b610bb6565b565b610b40906105c0565b90565b610b4c90610b37565b90565b610b58906105dc565b90565b90505190610b68826100c3565b565b90602082820312610b8357610b80915f01610b5b565b90565b6100a8565b15610b8f57565b5f80fd5b916020610bb4929493610bad60408201965f830190610168565b01906109b1565b565b90610bc090610b43565b610be46020610bce83610b4f565b63c2eeeebd90610bdc6100a2565b9384926106c6565b8252815f81610bf560048201610104565b03925af18015610ce457610c2c93610c27925f92610cac575b50610c1b610c21916100b7565b916100b7565b14610b88565b610b4f565b916318bf5077919092803b15610ca757610c595f8094610c64610c4d6100a2565b978896879586946106c6565b845260048401610b93565b03925af18015610ca257610c76575b50565b610c95905f3d8111610c9b575b610c8d8183610498565b8101906106cc565b5f610c73565b503d610c83565b610700565b6106c2565b610c21919250610cd5610c1b9160203d8111610cdd575b610ccd8183610498565b810190610b6a565b929150610c0e565b503d610cc3565b610700565b90610cf5939291610b05565b565b610d245f808334610d066100a2565b9081610d118161046e565b03925af1610d1d610515565b50156109fc565b610d6f575b3490610d6a610d587f85a190caa61692b36b63a55e069330d18ab9af179fed7a25c16a4262bc63b7d2926105e8565b92610d616100a2565b91829182610a01565b0390a2565b610d783061089a565b6351cff8d934919091908390803b15610df957610da85f93610db395610d9c6100a2565b968795869485936106c6565b835260048301610175565b03925af18015610df457610dc8575b50610d29565b610de7905f3d8111610ded575b610ddf8183610498565b8101906106cc565b5f610dc2565b503d610dd5565b610700565b6106c2565b929091610e1d82610e17610e115f61038d565b916101bf565b11610431565b610e2e610e2984610b43565b610b4f565b93632b8c49e3338496803b15610f1157610e5b5f8094610e66610e4f6100a2565b9b8c96879586946106c6565b845260048401610b93565b03925af1948515610f0c57610ede95610ee0575b50808483908592610ed2610ec0610eba610eb47f54538b93c6e9b3f518076db2d896122f653fac2bb32fa0b6bc75097b9f332e75946105e8565b946105e8565b946105e8565b94610ec96100a2565b91829182610a01565b0390a492909192610f16565b565b610eff905f3d8111610f05575b610ef78183610498565b8101906106cc565b5f610e7a565b503d610eed565b610700565b6106c2565b9190610f6290610f53610f37610f32610f2d610299565b61061c565b610628565b9562cffbe5959294610f476100a2565b9586946020860161067f565b60208201810382520382610498565b610f74610f6e826106be565b916106b8565b20823b15610fe857610fa592610f9a5f8094610f8e6100a2565b968795869485936106c6565b8352600483016106eb565b03925af18015610fe357610fb7575b50565b610fd6905f3d8111610fdc575b610fce8183610498565b8101906106cc565b5f610fb4565b503d610fc4565b610700565b6106c256fea264697066735822122021094d46fd2b6014fcc19d0fe48d1c311b59052f06dc38da5cc017ceb3e86f0964736f6c634300081d0033", + "code": "0x60806040526004361061007a575f3560e01c80637e1233a91161004d5780637e1233a914610114578063b0f4d3951461013e578063d23061db1461015a578063fccc2813146101825761007a565b806351cff8d91461007e57806358bc83371461009a57806379204fe0146100c457806379c0cdef146100ec575b5f5ffd5b61009860048036038101906100939190610a79565b6101ac565b005b3480156100a5575f5ffd5b506100ae61039c565b6040516100bb9190610ab3565b60405180910390f35b3480156100cf575f5ffd5b506100ea60048036038101906100e59190610aff565b6103b4565b005b3480156100f7575f5ffd5b50610112600480360381019061010d9190610aff565b610593565b005b34801561011f575f5ffd5b50610128610718565b6040516101359190610ab3565b60405180910390f35b61015860048036038101906101539190610a79565b61071e565b005b348015610165575f5ffd5b50610180600480360381019061017b9190610aff565b610846565b005b34801561018d575f5ffd5b5061019661097e565b6040516101a39190610ab3565b60405180910390f35b5f34116101ee576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016101e590610be3565b60405180910390fd5b5f5f73ffffffffffffffffffffffffffffffffffffffff163460405161021390610c2e565b5f6040518083038185875af1925050503d805f811461024d576040519150601f19603f3d011682016040523d82523d5f602084013e610252565b606091505b5050905080610296576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161028d90610c8c565b60405180910390fd5b348273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fbb2689ff876f7ef453cf8865dde5ab10349d222e2e1383c5152fbdb083f02da260405160405180910390a461fffe73ffffffffffffffffffffffffffffffffffffffff1662cffbe573eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee8085346040516020016103399493929190610d0f565b604051602081830303815290604052805190602001206040518263ffffffff1660e01b815260040161036b9190610d74565b5f604051808303815f87803b158015610382575f5ffd5b505af1158015610394573d5f5f3e3d5ffd5b505050505050565b73eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee81565b3073ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610422576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161041990610dfd565b60405180910390fd5b5f3073ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff166379c0cdef8787878760405160240161046d9493929190610e2a565b604051602081830303815290604052915060e01b6020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050506040516104bc9190610eb5565b5f604051808303815f865af19150503d805f81146104f5576040519150601f19603f3d011682016040523d82523d5f602084013e6104fa565b606091505b50509050806105105761050f85858585610982565b5b8273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff167ff5353a2477e10b23280de25ca6cea55c17bb48000d8807ee631e514080e7fb4e856040516105849190610ecb565b60405180910390a45050505050565b3073ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610601576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016105f890610dfd565b60405180910390fd5b5f8390508473ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1663c2eeeebd6040518163ffffffff1660e01b81526004016020604051808303815f875af1158015610666573d5f5f3e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061068a9190610ef8565b73ffffffffffffffffffffffffffffffffffffffff16146106a9575f5ffd5b8073ffffffffffffffffffffffffffffffffffffffff166318bf507784846040518363ffffffff1660e01b81526004016106e4929190610f23565b5f604051808303815f87803b1580156106fb575f5ffd5b505af115801561070d573d5f5f3e3d5ffd5b505050505050505050565b61fffe81565b5f8173ffffffffffffffffffffffffffffffffffffffff163460405161074390610c2e565b5f6040518083038185875af1925050503d805f811461077d576040519150601f19603f3d011682016040523d82523d5f602084013e610782565b606091505b50509050806107f4573073ffffffffffffffffffffffffffffffffffffffff166351cff8d934846040518363ffffffff1660e01b81526004016107c59190610ab3565b5f604051808303818588803b1580156107dc575f5ffd5b505af11580156107ee573d5f5f3e3d5ffd5b50505050505b8173ffffffffffffffffffffffffffffffffffffffff167f85a190caa61692b36b63a55e069330d18ab9af179fed7a25c16a4262bc63b7d23460405161083a9190610ecb565b60405180910390a25050565b5f8111610888576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161087f90610be3565b60405180910390fd5b8273ffffffffffffffffffffffffffffffffffffffff16632b8c49e333836040518363ffffffff1660e01b81526004016108c3929190610f23565b5f604051808303815f87803b1580156108da575f5ffd5b505af11580156108ec573d5f5f3e3d5ffd5b505050508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167f54538b93c6e9b3f518076db2d896122f653fac2bb32fa0b6bc75097b9f332e75846040516109649190610ecb565b60405180910390a461097884848484610982565b50505050565b5f81565b61fffe73ffffffffffffffffffffffffffffffffffffffff1662cffbe5858585856040516020016109b69493929190610d0f565b604051602081830303815290604052805190602001206040518263ffffffff1660e01b81526004016109e89190610d74565b5f604051808303815f87803b1580156109ff575f5ffd5b505af1158015610a11573d5f5f3e3d5ffd5b5050505050505050565b5f5ffd5b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f610a4882610a1f565b9050919050565b610a5881610a3e565b8114610a62575f5ffd5b50565b5f81359050610a7381610a4f565b92915050565b5f60208284031215610a8e57610a8d610a1b565b5b5f610a9b84828501610a65565b91505092915050565b610aad81610a3e565b82525050565b5f602082019050610ac65f830184610aa4565b92915050565b5f819050919050565b610ade81610acc565b8114610ae8575f5ffd5b50565b5f81359050610af981610ad5565b92915050565b5f5f5f5f60808587031215610b1757610b16610a1b565b5b5f610b2487828801610a65565b9450506020610b3587828801610a65565b9350506040610b4687828801610a65565b9250506060610b5787828801610aeb565b91505092959194509250565b5f82825260208201905092915050565b7f5769746864726177616c20616d6f756e74206d75737420626520706f736974695f8201527f7665000000000000000000000000000000000000000000000000000000000000602082015250565b5f610bcd602283610b63565b9150610bd882610b73565b604082019050919050565b5f6020820190508181035f830152610bfa81610bc1565b9050919050565b5f81905092915050565b50565b5f610c195f83610c01565b9150610c2482610c0b565b5f82019050919050565b5f610c3882610c0e565b9150819050919050565b7f4661696c656420746f206275726e2045746865720000000000000000000000005f82015250565b5f610c76601483610b63565b9150610c8182610c42565b602082019050919050565b5f6020820190508181035f830152610ca381610c6a565b9050919050565b5f8160601b9050919050565b5f610cc082610caa565b9050919050565b5f610cd182610cb6565b9050919050565b610ce9610ce482610a3e565b610cc7565b82525050565b5f819050919050565b610d09610d0482610acc565b610cef565b82525050565b5f610d1a8287610cd8565b601482019150610d2a8286610cd8565b601482019150610d3a8285610cd8565b601482019150610d4a8284610cf8565b60208201915081905095945050505050565b5f819050919050565b610d6e81610d5c565b82525050565b5f602082019050610d875f830184610d65565b92915050565b7f436f6d6d6f6e4272696467654c323a2063616c6c6572206973206e6f742074685f8201527f6520627269646765000000000000000000000000000000000000000000000000602082015250565b5f610de7602883610b63565b9150610df282610d8d565b604082019050919050565b5f6020820190508181035f830152610e1481610ddb565b9050919050565b610e2481610acc565b82525050565b5f608082019050610e3d5f830187610aa4565b610e4a6020830186610aa4565b610e576040830185610aa4565b610e646060830184610e1b565b95945050505050565b5f81519050919050565b8281835e5f83830152505050565b5f610e8f82610e6d565b610e998185610c01565b9350610ea9818560208601610e77565b80840191505092915050565b5f610ec08284610e85565b915081905092915050565b5f602082019050610ede5f830184610e1b565b92915050565b5f81519050610ef281610a4f565b92915050565b5f60208284031215610f0d57610f0c610a1b565b5b5f610f1a84828501610ee4565b91505092915050565b5f604082019050610f365f830185610aa4565b610f436020830184610e1b565b939250505056fea264697066735822122086a87c6bfbc7e28342dae591224273aac40995ea53bf466d5b4a9bb1fa912cdf64736f6c634300081d0033", "storage": {}, "balance": "0x0", "nonce": "0x1" } } -} \ No newline at end of file +} From aac658c5380180bad56e9cea6515c695a6fb7dd4 Mon Sep 17 00:00:00 2001 From: Lucas Fiegl Date: Tue, 1 Jul 2025 12:24:31 -0300 Subject: [PATCH 35/40] feat(l2): burn gas when sending privileged transactions (#3407) **Motivation** To prevent users from sending L2 transactions for free, we must charge them for the gas sent. **Description** One way to do this is to burn the gas limit specified at L1 prices. Closes #3402, closes #2156 --- crates/l2/contracts/src/l1/CommonBridge.sol | 8 ++++++ crates/l2/tests/tests.rs | 31 ++++++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/crates/l2/contracts/src/l1/CommonBridge.sol b/crates/l2/contracts/src/l1/CommonBridge.sol index 97fea19d94..41874266d7 100644 --- a/crates/l2/contracts/src/l1/CommonBridge.sol +++ b/crates/l2/contracts/src/l1/CommonBridge.sol @@ -107,8 +107,16 @@ contract CommonBridge is function getPendingDepositLogs() public view returns (bytes32[] memory) { return pendingDepositLogs; } + + /// Burns at least {amount} gas + function _burnGas(uint256 amount) private view { + uint256 startingGas = gasleft(); + while (startingGas - gasleft() < amount) {} + } function _sendToL2(address from, SendValues memory sendValues) private { + _burnGas(sendValues.gasLimit); + bytes32 l2MintTxHash = keccak256( bytes.concat( bytes20(sendValues.to), diff --git a/crates/l2/tests/tests.rs b/crates/l2/tests/tests.rs index 25ec1d0610..cd123ff3a9 100644 --- a/crates/l2/tests/tests.rs +++ b/crates/l2/tests/tests.rs @@ -108,6 +108,8 @@ async fn l2_integration_test() -> Result<(), Box> { ) .await?; + test_gas_burning(ð_client).await?; + test_deposit_with_contract_call(&proposer_client, ð_client).await?; test_deposit_with_contract_call_revert(&proposer_client, ð_client).await?; @@ -612,6 +614,33 @@ async fn test_transfer_with_deposit( Ok(()) } +async fn test_gas_burning(eth_client: &EthClient) -> Result<(), Box> { + println!("Transferring funds on L2 through a deposit"); + let rich_private_key = l1_rich_wallet_private_key(); + let rich_address = get_address_from_secret_key(&rich_private_key)?; + let l2_gas_limit = 2_000_000; + let l1_extra_gas_limit = 400_000; + + let l1_to_l2_tx_hash = ethrex_l2_sdk::send_l1_to_l2_tx( + rich_address, + Some(0), + Some(l2_gas_limit + l1_extra_gas_limit), + L1ToL2TransactionData::new(rich_address, l2_gas_limit, U256::zero(), Bytes::new()), + &rich_private_key, + common_bridge_address(), + eth_client, + ) + .await?; + + println!("Waiting for L1 to L2 transaction receipt on L1"); + + let l1_to_l2_tx_receipt = wait_for_transaction_receipt(l1_to_l2_tx_hash, eth_client, 5).await?; + + assert!(l1_to_l2_tx_receipt.tx_info.gas_used > l2_gas_limit); + assert!(l1_to_l2_tx_receipt.tx_info.gas_used < l2_gas_limit + l1_extra_gas_limit); + Ok(()) +} + async fn test_deposit_not_enough_balance( receiver_private_key: &SecretKey, eth_client: &EthClient, @@ -1090,7 +1119,7 @@ async fn test_call_to_contract_with_deposit( let l1_to_l2_tx_hash = ethrex_l2_sdk::send_l1_to_l2_tx( caller_address, Some(0), - Some(21000 * 5), + Some(21000 * 10), L1ToL2TransactionData::new( deployed_contract_address, 21000 * 5, From 48e8bbb62bbef641df64d7d7949014b8dcc044a8 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Tue, 1 Jul 2025 18:19:49 -0300 Subject: [PATCH 36/40] Fixed quote-gen Cargo.lock and reseted spawned sha --- crates/l2/tee/quote-gen/Cargo.lock | 74 +++++++++++++---------------- crates/l2/tee/quote-gen/service.nix | 2 +- 2 files changed, 34 insertions(+), 42 deletions(-) diff --git a/crates/l2/tee/quote-gen/Cargo.lock b/crates/l2/tee/quote-gen/Cargo.lock index 122ce9ca9f..948c6af6d5 100644 --- a/crates/l2/tee/quote-gen/Cargo.lock +++ b/crates/l2/tee/quote-gen/Cargo.lock @@ -70,7 +70,7 @@ dependencies = [ "hex", "lambdaworks-crypto 0.12.0", "log", - "reqwest 0.12.20", + "reqwest 0.12.22", "serde", "serde_json", "serde_repr", @@ -1538,7 +1538,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1548,7 +1548,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2229,7 +2229,7 @@ dependencies = [ "hex", "jsonwebtoken 9.3.1", "keccak-hash", - "reqwest 0.12.20", + "reqwest 0.12.22", "serde", "serde_json", "sha2", @@ -2268,7 +2268,7 @@ dependencies = [ "keccak-hash", "lazy_static", "rand 0.8.5", - "reqwest 0.12.20", + "reqwest 0.12.22", "secp256k1", "serde", "serde_json", @@ -2406,7 +2406,7 @@ dependencies = [ "k256", "keccak-hash", "rand 0.8.5", - "reqwest 0.12.20", + "reqwest 0.12.22", "secp256k1", "serde", "serde_json", @@ -2435,7 +2435,7 @@ dependencies = [ "itertools 0.13.0", "keccak-hash", "lazy_static", - "reqwest 0.12.20", + "reqwest 0.12.22", "secp256k1", "serde", "serde_json", @@ -2912,9 +2912,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" +checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" dependencies = [ "atomic-waker", "bytes", @@ -3137,7 +3137,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.10", + "h2 0.4.11", "http 1.3.1", "http-body 1.0.1", "httparse", @@ -3752,18 +3752,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "libgit2-sys" -version = "0.18.2+1.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c42fe03df2bd3c53a3a9c7317ad91d80c81cd1fb0caec8d7cc4cd2bfa10c222" -dependencies = [ - "cc", - "libc", - "libz-sys", - "pkg-config", -] - [[package]] name = "libm" version = "0.2.15" @@ -4080,15 +4068,6 @@ dependencies = [ "libc", ] -[[package]] -name = "num_threads" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" -dependencies = [ - "libc", -] - [[package]] name = "nybbles" version = "0.3.4" @@ -4991,15 +4970,15 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.20" +version = "0.12.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", "futures-core", - "h2 0.4.10", + "h2 0.4.11", "http 1.3.1", "http-body 1.0.1", "http-body-util", @@ -5423,6 +5402,18 @@ dependencies = [ "serde_json", ] +[[package]] +name = "schemars" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1375ba8ef45a6f15d83fa8748f1079428295d403d6ea991d09ab100155fbc06d" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -5632,16 +5623,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", "indexmap 2.10.0", - "schemars", + "schemars 0.9.0", + "schemars 1.0.3", "serde", "serde_derive", "serde_json", @@ -5651,9 +5643,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ "darling", "proc-macro2", @@ -5855,7 +5847,7 @@ dependencies = [ [[package]] name = "spawned-concurrency" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.1-alpha#481439d3d147fa741d67bb5ff047f8c68bcefb83" +source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.2-alpha#c6f757c0cc07a34f9e56c8c7ea8fde483b50ea20" dependencies = [ "futures", "spawned-rt", @@ -5865,7 +5857,7 @@ dependencies = [ [[package]] name = "spawned-rt" version = "0.1.0" -source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.1-alpha#481439d3d147fa741d67bb5ff047f8c68bcefb83" +source = "git+https://github.com/lambdaclass/spawned.git?tag=v0.1.2-alpha#c6f757c0cc07a34f9e56c8c7ea8fde483b50ea20" dependencies = [ "crossbeam", "tokio", diff --git a/crates/l2/tee/quote-gen/service.nix b/crates/l2/tee/quote-gen/service.nix index 1df2c1d64a..a0e7f97612 100644 --- a/crates/l2/tee/quote-gen/service.nix +++ b/crates/l2/tee/quote-gen/service.nix @@ -20,7 +20,7 @@ let lockFile = ./Cargo.lock; outputHashes = { "bls12_381-0.8.0" = "sha256-8/pXRA7hVAPeMKCZ+PRPfQfxqstw5Ob4MJNp85pv5WQ="; - "spawned-concurrency-0.1.0" = "sha256-ZnQ6eBFG/r1chTbaoh117J7QmtogwVdu6q/j7JLvO/o="; + "spawned-concurrency-0.1.0" = ""; "aligned-sdk-0.1.0" = "sha256-Az97VtggdN4gsYds3myezNJ+mNeSaIDbF0Pq5kq2M3M="; "lambdaworks-crypto-0.12.0" = "sha256-4vgW/O85zVLhhFrcZUwcPjavy/rRWB8LGTabAkPNrDw="; }; From ef9864d85ef237e700fa1a1443a7a6f20b319517 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Tue, 1 Jul 2025 18:32:53 -0300 Subject: [PATCH 37/40] Updated spawned sha --- crates/l2/sequencer/proof_coordinator.rs | 4 +++- crates/l2/tee/quote-gen/service.nix | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/l2/sequencer/proof_coordinator.rs b/crates/l2/sequencer/proof_coordinator.rs index 79b69b6e07..af69ec5bda 100644 --- a/crates/l2/sequencer/proof_coordinator.rs +++ b/crates/l2/sequencer/proof_coordinator.rs @@ -396,7 +396,9 @@ async fn handle_connection( batch_number, batch_proof, }) => { - if let Err(e) = handle_submit(state, &mut stream, batch_number, batch_proof).await { + if let Err(e) = + handle_submit(state, &mut stream, batch_number, batch_proof).await + { error!("Failed to handle ProofSubmit: {e}"); } } diff --git a/crates/l2/tee/quote-gen/service.nix b/crates/l2/tee/quote-gen/service.nix index a0e7f97612..1d1479539c 100644 --- a/crates/l2/tee/quote-gen/service.nix +++ b/crates/l2/tee/quote-gen/service.nix @@ -20,7 +20,7 @@ let lockFile = ./Cargo.lock; outputHashes = { "bls12_381-0.8.0" = "sha256-8/pXRA7hVAPeMKCZ+PRPfQfxqstw5Ob4MJNp85pv5WQ="; - "spawned-concurrency-0.1.0" = ""; + "spawned-concurrency-0.1.0" = "sha256-63xBuGAlrHvIf8hboScUY4LZronPZJZzmfJBdAbUKTU="; "aligned-sdk-0.1.0" = "sha256-Az97VtggdN4gsYds3myezNJ+mNeSaIDbF0Pq5kq2M3M="; "lambdaworks-crypto-0.12.0" = "sha256-4vgW/O85zVLhhFrcZUwcPjavy/rRWB8LGTabAkPNrDw="; }; From 4fca73fa0ef76be8dfbdca584756942aee804b11 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Wed, 2 Jul 2025 17:18:55 -0300 Subject: [PATCH 38/40] Addressed PR review comments --- crates/l2/sequencer/l1_watcher_old.rs | 377 ------------------ crates/l2/sequencer/proof_coordinator.rs | 70 ++-- crates/networking/p2p/discv4/server.rs | 4 +- .../p2p/rlpx/connection/handshake.rs | 5 +- .../networking/p2p/rlpx/connection/server.rs | 13 +- crates/networking/p2p/rlpx/p2p.rs | 4 +- 6 files changed, 44 insertions(+), 429 deletions(-) delete mode 100644 crates/l2/sequencer/l1_watcher_old.rs diff --git a/crates/l2/sequencer/l1_watcher_old.rs b/crates/l2/sequencer/l1_watcher_old.rs deleted file mode 100644 index e7b2617d0f..0000000000 --- a/crates/l2/sequencer/l1_watcher_old.rs +++ /dev/null @@ -1,377 +0,0 @@ -use crate::{sequencer::errors::L1WatcherError, utils::parse::hash_to_address}; -use crate::{EthConfig, L1WatcherConfig, SequencerConfig}; -use bytes::Bytes; -use ethereum_types::{Address, H256, U256}; -use ethrex_blockchain::Blockchain; -use ethrex_common::{types::Transaction, H160}; -use ethrex_rpc::types::receipt::RpcLog; -use ethrex_rpc::{ - clients::eth::{eth_sender::Overrides, EthClient}, - types::receipt::RpcLogInfo, -}; -use ethrex_storage::Store; -use keccak_hash::keccak; -use std::{cmp::min, sync::Arc}; -use tracing::{debug, error, info, warn}; - -use super::errors::SequencerError; -use super::utils::sleep_random; - -pub async fn start_l1_watcher( - store: Store, - blockchain: Arc, - cfg: SequencerConfig, -) -> Result<(), SequencerError> { - let mut l1_watcher = L1Watcher::new_from_config(&cfg.l1_watcher, &cfg.eth).await?; - l1_watcher.run(&store, &blockchain).await; - Ok(()) -} - -pub struct L1Watcher { - eth_client: EthClient, - l2_client: EthClient, - address: Address, - max_block_step: U256, - last_block_fetched: U256, - check_interval: u64, - l1_block_delay: u64, -} - -impl L1Watcher { - pub async fn new_from_config( - watcher_config: &L1WatcherConfig, - eth_config: &EthConfig, - ) -> Result { - let eth_client = EthClient::new(ð_config.rpc_url); - let l2_client = EthClient::new("http://localhost:1729"); - - let last_block_fetched = U256::zero(); - Ok(Self { - eth_client, - l2_client, - address: watcher_config.bridge_address, - max_block_step: watcher_config.max_block_step, - last_block_fetched, - check_interval: watcher_config.check_interval_ms, - l1_block_delay: watcher_config.watcher_block_delay, - }) - } - - pub async fn run(&mut self, store: &Store, blockchain: &Blockchain) { - loop { - if let Err(err) = self.main_logic(store, blockchain).await { - error!("L1 Watcher Error: {}", err); - } - } - } - - async fn main_logic( - &mut self, - store: &Store, - blockchain: &Blockchain, - ) -> Result<(), L1WatcherError> { - loop { - sleep_random(self.check_interval).await; - - let logs = self.get_logs().await?; - - // We may not have a deposit nor a withdrawal, that means no events -> no logs. - if logs.is_empty() { - continue; - } - - let _deposit_txs = self.process_logs(logs, store, blockchain).await?; - } - } - - pub async fn get_logs(&mut self) -> Result, L1WatcherError> { - if self.last_block_fetched.is_zero() { - self.last_block_fetched = self - .eth_client - .get_last_fetched_l1_block(self.address) - .await? - .into(); - } - - let Some(latest_block_to_check) = self - .eth_client - .get_block_number() - .await? - .checked_sub(self.l1_block_delay.into()) - else { - warn!("Too close to genesis to request deposits"); - return Ok(vec![]); - }; - - debug!( - "Latest possible block number with {} blocks of delay: {latest_block_to_check} ({latest_block_to_check:#x})", - self.l1_block_delay, - ); - - // last_block_fetched could be greater than latest_block_to_check: - // - Right after deploying the contract as latest_block_fetched is set to the block where the contract is deployed - // - If the node is stopped and l1_block_delay is changed - if self.last_block_fetched > latest_block_to_check { - warn!("Last block fetched is greater than latest safe block"); - return Ok(vec![]); - } - - let new_last_block = min( - self.last_block_fetched + self.max_block_step, - latest_block_to_check, - ); - - debug!( - "Looking logs from block {:#x} to {:#x}", - self.last_block_fetched, new_last_block - ); - - // Matches the event DepositInitiated from ICommonBridge.sol - let topic = keccak( - b"DepositInitiated(uint256,address,uint256,address,address,uint256,bytes,bytes32)", - ); - let logs = match self - .eth_client - .get_logs( - self.last_block_fetched + 1, - new_last_block, - self.address, - topic, - ) - .await - { - Ok(logs) => logs, - Err(error) => { - // We may get an error if the RPC doesn't has the logs for the requested - // block interval. For example, Light Nodes. - warn!("Error when getting logs from L1: {}", error); - vec![] - } - }; - - debug!("Logs: {:#?}", logs); - - // If we have an error adding the tx to the mempool we may assign it to the next - // block to fetch, but we may lose a deposit tx. - self.last_block_fetched = new_last_block; - - Ok(logs) - } - - pub async fn process_logs( - &self, - logs: Vec, - store: &Store, - blockchain: &Blockchain, - ) -> Result, L1WatcherError> { - let mut deposit_txs = Vec::new(); - - for log in logs { - let deposit_data = DepositData::from_log(log.log)?; - - if self - .deposit_already_processed(deposit_data.deposit_tx_hash, store) - .await? - { - warn!( - "Deposit already processed (to: {:x}, value: {:x}, depositId: {:#}), skipping.", - deposit_data.recipient, deposit_data.mint_value, deposit_data.deposit_id - ); - continue; - } - - info!( - "Initiating mint transaction for {:x} with value {:x} and depositId: {:#}", - deposit_data.recipient, deposit_data.mint_value, deposit_data.deposit_id - ); - - let gas_price = self.l2_client.get_gas_price().await?; - // Avoid panicking when using as_u64() - let gas_price: u64 = gas_price - .try_into() - .map_err(|_| L1WatcherError::Custom("Failed at gas_price.try_into()".to_owned()))?; - - let mint_transaction = self - .eth_client - .build_privileged_transaction( - deposit_data.to_address, - deposit_data.recipient, - deposit_data.from, - Bytes::copy_from_slice(&deposit_data.calldata), - Overrides { - chain_id: Some( - store - .get_chain_config() - .map_err(|e| { - L1WatcherError::FailedToRetrieveChainConfig(e.to_string()) - })? - .chain_id, - ), - // Using the deposit_id as nonce. - // If we make a transaction on the L2 with this address, we may break the - // deposit workflow. - nonce: Some(deposit_data.deposit_id.as_u64()), - value: Some(deposit_data.mint_value), - gas_limit: Some(deposit_data.gas_limit.as_u64()), - // TODO(CHECK): Seems that when we start the L2, we need to set the gas. - // Otherwise, the transaction is not included in the mempool. - // We should override the blockchain to always include the transaction. - max_fee_per_gas: Some(gas_price), - max_priority_fee_per_gas: Some(gas_price), - ..Default::default() - }, - ) - .await?; - - match blockchain - .add_transaction_to_pool(Transaction::PrivilegedL2Transaction(mint_transaction)) - .await - { - Ok(hash) => { - info!("Mint transaction added to mempool {hash:#x}",); - deposit_txs.push(hash); - } - Err(e) => { - warn!("Failed to add mint transaction to the mempool: {e:#?}"); - // TODO: Figure out if we want to continue or not - continue; - } - } - } - - Ok(deposit_txs) - } - - async fn deposit_already_processed( - &self, - deposit_hash: H256, - store: &Store, - ) -> Result { - if store - .get_transaction_by_hash(deposit_hash) - .await - .map_err(L1WatcherError::FailedAccessingStore)? - .is_some() - { - return Ok(true); - } - - // If we have a reconstructed state, we don't have the transaction in our store. - // Check if the deposit is marked as pending in the contract. - let pending_deposits = self - .eth_client - .get_pending_deposit_logs(self.address) - .await?; - Ok(!pending_deposits.contains(&deposit_hash)) - } -} - -struct DepositData { - pub mint_value: U256, - pub to_address: H160, - pub deposit_id: U256, - pub recipient: H160, - pub from: H160, - pub gas_limit: U256, - pub calldata: Vec, - pub deposit_tx_hash: H256, -} - -impl DepositData { - fn from_log(log: RpcLogInfo) -> Result { - let mint_value = format!( - "{:#x}", - log.topics - .get(1) - .ok_or(L1WatcherError::FailedToDeserializeLog( - "Failed to parse mint value from log: log.topics[1] out of bounds".to_owned() - ))? - ) - .parse::() - .map_err(|e| { - L1WatcherError::FailedToDeserializeLog(format!( - "Failed to parse mint value from log: {e:#?}" - )) - })?; - let to_address_hash = log - .topics - .get(2) - .ok_or(L1WatcherError::FailedToDeserializeLog( - "Failed to parse beneficiary from log: log.topics[2] out of bounds".to_owned(), - ))?; - let to_address = hash_to_address(*to_address_hash); - - let deposit_id = log - .topics - .get(3) - .ok_or(L1WatcherError::FailedToDeserializeLog( - "Failed to parse beneficiary from log: log.topics[3] out of bounds".to_owned(), - ))?; - - let deposit_id = format!("{deposit_id:#x}").parse::().map_err(|e| { - L1WatcherError::FailedToDeserializeLog(format!( - "Failed to parse depositId value from log: {e:#?}" - )) - })?; - - // The previous values are indexed in the topic of the log. Data contains the rest. - // DATA = recipient: Address || from: Address || gas_limit: uint256 || offset_calldata: uint256 || tx_hash: H256 || length_calldata: uint256 || calldata: bytes - // DATA = 0..32 || 32..64 || 64..96 || 96..128 || 128..160 || 160..192 || 192..(192+calldata_len) - // Any value that is not 32 bytes is padded with zeros. - - let recipient = log - .data - .get(12..32) - .ok_or(L1WatcherError::FailedToDeserializeLog( - "Failed to parse recipient from log: log.data[0..32] out of bounds".to_owned(), - ))?; - let recipient = Address::from_slice(recipient); - - let from = log - .data - .get(44..64) - .ok_or(L1WatcherError::FailedToDeserializeLog( - "Failed to parse from from log: log.data[44..64] out of bounds".to_owned(), - ))?; - let from = Address::from_slice(from); - - let gas_limit = U256::from_big_endian(log.data.get(64..96).ok_or( - L1WatcherError::FailedToDeserializeLog( - "Failed to parse gas_limit from log: log.data[64..96] out of bounds".to_owned(), - ), - )?); - - let deposit_tx_hash = H256::from_slice( - log.data - .get(128..160) - .ok_or(L1WatcherError::FailedToDeserializeLog( - "Failed to parse deposit_tx_hash from log: log.data[64..96] out of bounds" - .to_owned(), - ))?, - ); - - let calldata_len = U256::from_big_endian(log.data.get(160..192).ok_or( - L1WatcherError::FailedToDeserializeLog( - "Failed to parse calldata_len from log: log.data[96..128] out of bounds".to_owned(), - ), - )?); - let calldata = log - .data - .get(192..192 + calldata_len.as_usize()) - .ok_or(L1WatcherError::FailedToDeserializeLog( - "Failed to parse calldata from log: log.data[128..128 + calldata_len] out of bounds" - .to_owned(), - ))?; - - Ok(Self { - mint_value, - to_address, - deposit_id, - recipient, - from, - gas_limit, - calldata: calldata.to_vec(), - deposit_tx_hash, - }) - } -} diff --git a/crates/l2/sequencer/proof_coordinator.rs b/crates/l2/sequencer/proof_coordinator.rs index af69ec5bda..7db77e0cb9 100644 --- a/crates/l2/sequencer/proof_coordinator.rs +++ b/crates/l2/sequencer/proof_coordinator.rs @@ -357,7 +357,6 @@ impl GenServer for ConnectionHandler { _handle: &GenServerHandle, state: Self::State, ) -> CastResponse { - info!("Receiving message"); match message { ConnInMessage::Connection { stream, addr } => { if let Err(err) = handle_connection(&state, stream).await { @@ -378,48 +377,43 @@ async fn handle_connection( let mut buffer = Vec::new(); // TODO: This should be fixed in https://github.com/lambdaclass/ethrex/issues/3316 // (stream should not be wrapped in an Arc) - match Arc::try_unwrap(stream) { - Err(_) => { - error!("Unable to use stream"); - } - Ok(mut stream) => { - stream.read_to_end(&mut buffer).await?; - - let data: Result = serde_json::from_slice(&buffer); - match data { - Ok(ProofData::BatchRequest { commit_hash }) => { - if let Err(e) = handle_request(state, &mut stream, commit_hash).await { - error!("Failed to handle BatchRequest: {e}"); - } - } - Ok(ProofData::ProofSubmit { - batch_number, - batch_proof, - }) => { - if let Err(e) = - handle_submit(state, &mut stream, batch_number, batch_proof).await - { - error!("Failed to handle ProofSubmit: {e}"); - } - } - Ok(ProofData::ProverSetup { - prover_type, - payload, - }) => { - if let Err(e) = handle_setup(state, &mut stream, prover_type, payload).await { - error!("Failed to handle ProverSetup: {e}"); - } + if let Some(mut stream) = Arc::into_inner(stream) { + stream.read_to_end(&mut buffer).await?; + + let data: Result = serde_json::from_slice(&buffer); + match data { + Ok(ProofData::BatchRequest { commit_hash }) => { + if let Err(e) = handle_request(state, &mut stream, commit_hash).await { + error!("Failed to handle BatchRequest: {e}"); } - Err(e) => { - warn!("Failed to parse request: {e}"); + } + Ok(ProofData::ProofSubmit { + batch_number, + batch_proof, + }) => { + if let Err(e) = handle_submit(state, &mut stream, batch_number, batch_proof).await { + error!("Failed to handle ProofSubmit: {e}"); } - _ => { - warn!("Invalid request"); + } + Ok(ProofData::ProverSetup { + prover_type, + payload, + }) => { + if let Err(e) = handle_setup(state, &mut stream, prover_type, payload).await { + error!("Failed to handle ProverSetup: {e}"); } } - debug!("Connection closed"); + Ok(_) => { + warn!("Invalid request"); + } + Err(e) => { + warn!("Failed to parse request: {e}"); + } } - }; + debug!("Connection closed"); + } else { + error!("Unable to use stream"); + } Ok(()) } diff --git a/crates/networking/p2p/discv4/server.rs b/crates/networking/p2p/discv4/server.rs index 5ed76c7553..44d224c99a 100644 --- a/crates/networking/p2p/discv4/server.rs +++ b/crates/networking/p2p/discv4/server.rs @@ -225,7 +225,7 @@ impl Discv4Server { return Ok(()); } - let _ = RLPxConnection::spawn_as_initiator(self.ctx.clone(), &peer.node).await; + RLPxConnection::spawn_as_initiator(self.ctx.clone(), &peer.node).await; Ok(()) } @@ -518,7 +518,7 @@ impl Discv4Server { return Ok(()); } - let _ = RLPxConnection::spawn_as_initiator(self.ctx.clone(), &peer.node).await; + RLPxConnection::spawn_as_initiator(self.ctx.clone(), &peer.node).await; Ok(()) } diff --git a/crates/networking/p2p/rlpx/connection/handshake.rs b/crates/networking/p2p/rlpx/connection/handshake.rs index a26b8f06ca..7764e0cdff 100644 --- a/crates/networking/p2p/rlpx/connection/handshake.rs +++ b/crates/networking/p2p/rlpx/connection/handshake.rs @@ -90,9 +90,8 @@ pub(crate) async fn perform( peer_addr, stream, }) => { - let mut stream = match Arc::try_unwrap(stream) { - Ok(s) => s, - Err(_) => return Err(RLPxError::StateError("Cannot use the stream".to_string())), + let Some(mut stream) = Arc::into_inner(stream) else { + return Err(RLPxError::StateError("Cannot use the stream".to_string())); }; let remote_state = receive_auth(&context.signer, &mut stream).await?; let local_state = send_ack(remote_state.public_key, &mut stream).await?; diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 6974959665..f661d78efd 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -84,6 +84,8 @@ pub struct Receiver { #[derive(Clone)] pub struct Established { pub(crate) signer: SigningKey, + // Sending part of the TcpStream to connect with the remote peer + // The receiving part is owned by the stream listen loop task pub(crate) sink: Arc, Message>>>, pub(crate) node: Node, pub(crate) storage: Store, @@ -168,17 +170,14 @@ impl RLPxConnection { context: P2PContext, peer_addr: SocketAddr, stream: TcpStream, - ) -> Result { + ) -> RLPxConnectionHandle { let state = RLPxConnectionState::new_as_receiver(context, peer_addr, stream); - Ok(RLPxConnection::start(state)) + RLPxConnection::start(state) } - pub async fn spawn_as_initiator( - context: P2PContext, - node: &Node, - ) -> Result { + pub async fn spawn_as_initiator(context: P2PContext, node: &Node) -> RLPxConnectionHandle { let state = RLPxConnectionState::new_as_initiator(context, node); - Ok(RLPxConnection::start(state.clone())) + RLPxConnection::start(state.clone()) } } diff --git a/crates/networking/p2p/rlpx/p2p.rs b/crates/networking/p2p/rlpx/p2p.rs index d5748e7c7a..f7b754bb80 100644 --- a/crates/networking/p2p/rlpx/p2p.rs +++ b/crates/networking/p2p/rlpx/p2p.rs @@ -269,7 +269,7 @@ impl RLPxMessage for DisconnectMessage { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub(crate) struct PingMessage {} impl RLPxMessage for PingMessage { @@ -295,7 +295,7 @@ impl RLPxMessage for PingMessage { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub(crate) struct PongMessage {} impl RLPxMessage for PongMessage { From 0085d4794544e2662240c47e22c5ffa51b7fc9f1 Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Wed, 2 Jul 2025 17:53:50 -0300 Subject: [PATCH 39/40] Fixed wronged merge --- crates/l2/based/block_fetcher.rs | 2 -- crates/l2/sequencer/errors.rs | 2 -- crates/l2/sequencer/proof_coordinator.rs | 1 - 3 files changed, 5 deletions(-) diff --git a/crates/l2/based/block_fetcher.rs b/crates/l2/based/block_fetcher.rs index ec598bd1bd..71448d272b 100644 --- a/crates/l2/based/block_fetcher.rs +++ b/crates/l2/based/block_fetcher.rs @@ -57,8 +57,6 @@ pub enum BlockFetcherError { #[error("Failed to produce the blob bundle")] BlobBundleError, #[error("Failed to compute deposit logs hash: {0}")] - DepositError(#[from] ethrex_l2_common::deposits::DepositError), - #[error("Failed to compute privileged transactions hash: {0}")] PrivilegedTransactionError( #[from] ethrex_l2_common::privileged_transactions::PrivilegedTransactionError, ), diff --git a/crates/l2/sequencer/errors.rs b/crates/l2/sequencer/errors.rs index 71ced50fcb..86cba5ff46 100644 --- a/crates/l2/sequencer/errors.rs +++ b/crates/l2/sequencer/errors.rs @@ -239,8 +239,6 @@ pub enum CommitterError { InternalError(String), #[error("Failed to get withdrawals: {0}")] FailedToGetWithdrawals(#[from] UtilsError), - #[error("Deposit error: {0}")] - DepositError(#[from] DepositError), #[error("Privileged Transaction error: {0}")] PrivilegedTransactionError(#[from] PrivilegedTransactionError), // TODO: Avoid propagating GenServerErrors outside GenServer modules diff --git a/crates/l2/sequencer/proof_coordinator.rs b/crates/l2/sequencer/proof_coordinator.rs index 7db77e0cb9..0cb5ae2c82 100644 --- a/crates/l2/sequencer/proof_coordinator.rs +++ b/crates/l2/sequencer/proof_coordinator.rs @@ -272,7 +272,6 @@ impl GenServer for ProofCoordinator { _handle: &GenServerHandle, state: Self::State, ) -> CastResponse { - info!("Receiving message"); match message { ProofCordInMessage::Listen { listener } => { handle_listens(&state, listener).await; From 5884bc2b20f15e453da808d0f0a1d6da0d9b852c Mon Sep 17 00:00:00 2001 From: Esteban Dimitroff Hodi Date: Thu, 3 Jul 2025 09:20:37 -0300 Subject: [PATCH 40/40] Removed unused struct --- crates/networking/p2p/rlpx/connection/server.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index f661d78efd..51bfd6f066 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -137,9 +137,6 @@ impl RLPxConnectionState { } } -#[derive(Clone)] -pub enum CallMessage {} - #[derive(Clone)] #[allow(private_interfaces)] pub enum CastMessage {