From b7ab60760af9872bc43c6df2f850a2d9b8b1fde9 Mon Sep 17 00:00:00 2001 From: zclawz Date: Thu, 12 Mar 2026 20:23:16 +0530 Subject: [PATCH 01/24] docs: add Poseidon SSZ hasher documentation (#646) Co-authored-by: zclawz Co-authored-by: Anshal Shukla <53994948+anshalshukla@users.noreply.github.com> --- README.md | 4 +++- resources/poseidon.md | 30 ++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 resources/poseidon.md diff --git a/README.md b/README.md index 93af2a03d..5d7000e27 100644 --- a/README.md +++ b/README.md @@ -109,7 +109,7 @@ Zeam is developing and contributing to the Zig Ethereum ecosystem. These librari | Library | Description | |---------|-------------| -| [ssz.zig](https://github.com/blockblaz/ssz.zig) | SSZ serialization with configurable hash function | +| [ssz.zig](https://github.com/blockblaz/ssz.zig) | SSZ serialization with configurable hash function (SHA256 or Poseidon2) | | [zig-snappy](https://github.com/blockblaz/zig-snappy) / [snappyframesz](https://github.com/blockblaz/snappyframesz) | Snappy compression | | [zig-libp2p-pocs](https://github.com/blockblaz/zig-libp2p-pocs) | Zig ↔ Rust libp2p interop | | [hash-sigz](https://github.com/blockblaz/hash-sigz) | Hash-based signature schemes | @@ -143,6 +143,8 @@ To include the git version in the binary: zig build -Doptimize=ReleaseFast -Dgit_version="$(git rev-parse --short HEAD)" ``` + +> To use Poseidon2 as the SSZ hash function instead of SHA256, see [resources/poseidon.md](resources/poseidon.md). ### Running the Prover Demo ```bash diff --git a/resources/poseidon.md b/resources/poseidon.md new file mode 100644 index 000000000..05052664f --- /dev/null +++ b/resources/poseidon.md @@ -0,0 +1,30 @@ +# SSZ Poseidon Hasher + +> ⚠️ Not cryptographically reviewed. Use for development and testing only. + +Zeam supports Poseidon2 as an alternative SSZ hash function, intended for +ZK-friendly state hashing. It is disabled by default. + +## Enabling Poseidon + +Pass `-Duse_poseidon=true` at build time: + +```sh +zig build -Doptimize=ReleaseFast -Dgit_version="$(git rev-parse --short HEAD)" -Duse_poseidon=true +``` + +The default (SHA256) build remains: + +```sh +zig build -Doptimize=ReleaseFast -Dgit_version="$(git rev-parse --short HEAD)" +``` + +## How It Works + +SSZ inputs (arbitrary byte sequences) are packed into KoalaBear field elements +using 24-bit data legs before being passed to the Poseidon2-24 permutation. +This transformation is required to fit generic SSZ byte data into Poseidon's +prime field constraints. + +The Poseidon2 implementation is validated against Plonky3 test vectors for +cross-language parity. From 6faa4b911c72abfb24027b03c48bbba744bdce2d Mon Sep 17 00:00:00 2001 From: zclawz Date: Thu, 12 Mar 2026 21:51:53 +0530 Subject: [PATCH 02/24] docs: add LeanSpec client documentation links in zeam.md (#645) * docs: add LeanSpec client documentation links in zeam.md (closes #337) * Add Lean Consensus Book link to documentation --------- Co-authored-by: zclawz Co-authored-by: Anshal Shukla <53994948+anshalshukla@users.noreply.github.com> Co-authored-by: chethack/Chetany <95150398+chetanyb@users.noreply.github.com> --- resources/zeam.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/resources/zeam.md b/resources/zeam.md index 62cfdb5d5..0c9dad809 100644 --- a/resources/zeam.md +++ b/resources/zeam.md @@ -4,6 +4,17 @@ [Zeam & Beam Wiki](https://github.com/blockblaz/zeam/wiki) +## LeanSpec Client Documentation + +[Lean Consensus Book](https://github.com/leanEthereum/leanSpec/blob/main/lean_consensus.pdf) + +[LeanSpec Client Docs](https://github.com/leanEthereum/leanSpec/tree/main/docs/client) + +- [Chain](https://github.com/leanEthereum/leanSpec/blob/main/docs/client/chain.md) +- [Fork Choice](https://github.com/leanEthereum/leanSpec/blob/main/docs/client/forkchoice.md) +- [Networking](https://github.com/leanEthereum/leanSpec/blob/main/docs/client/networking.md) +- [Validator](https://github.com/leanEthereum/leanSpec/blob/main/docs/client/validator.md) + ## Zeam Client POC While the Beam research community is working on Beam specs, we at Zeam are aiming to have a POC of one of From 5becc21ee3dbcd6b9b20794c24cdcbcfebf4ba56 Mon Sep 17 00:00:00 2001 From: zclawz Date: Thu, 12 Mar 2026 22:11:20 +0530 Subject: [PATCH 03/24] fix: handle and fetch missing roots in gossip attestation and aggregated attestation (closes #657) (#659) Co-authored-by: zclawz --- pkgs/node/src/chain.zig | 42 +++++++++++++++++++++++++++++------------ pkgs/node/src/node.zig | 11 +++++------ 2 files changed, 35 insertions(+), 18 deletions(-) diff --git a/pkgs/node/src/chain.zig b/pkgs/node/src/chain.zig index 92c728c6d..96249825b 100644 --- a/pkgs/node/src/chain.zig +++ b/pkgs/node/src/chain.zig @@ -739,10 +739,21 @@ pub const BeamChain = struct { // Validate attestation before processing (gossip = not from block) self.validateAttestationData(signed_attestation.message.message, false) catch |err| { zeam_metrics.metrics.lean_attestations_invalid_total.incr(.{ .source = "gossip" }) catch {}; - // Propagate unknown block errors to node.zig for context-aware logging - // (downgrade to debug when the missing block is already being fetched) switch (err) { - error.UnknownHeadBlock, error.UnknownSourceBlock, error.UnknownTargetBlock => return err, + error.UnknownHeadBlock, error.UnknownSourceBlock, error.UnknownTargetBlock => { + // Add the missing root to the result so node's onGossip can enqueue it for fetching + const att_data = signed_attestation.message.message; + const missing_root = if (err == error.UnknownHeadBlock) + att_data.head.root + else if (err == error.UnknownSourceBlock) + att_data.source.root + else + att_data.target.root; + var roots: std.ArrayListUnmanaged(types.Root) = .empty; + errdefer roots.deinit(self.allocator); + try roots.append(self.allocator, missing_root); + return .{ .missing_attestation_roots = try roots.toOwnedSlice(self.allocator) }; + }, else => { self.logger.warn("gossip attestation validation failed: {any}", .{err}); return .{}; @@ -775,7 +786,20 @@ pub const BeamChain = struct { self.validateAttestationData(signed_aggregation.data, false) catch |err| { zeam_metrics.metrics.lean_attestations_invalid_total.incr(.{ .source = "aggregation" }) catch {}; switch (err) { - error.UnknownHeadBlock, error.UnknownSourceBlock, error.UnknownTargetBlock => return err, + error.UnknownHeadBlock, error.UnknownSourceBlock, error.UnknownTargetBlock => { + // Add the missing root to the result so node's onGossip can enqueue it for fetching + const att_data = signed_aggregation.data; + const missing_root = if (err == error.UnknownHeadBlock) + att_data.head.root + else if (err == error.UnknownSourceBlock) + att_data.source.root + else + att_data.target.root; + var roots: std.ArrayListUnmanaged(types.Root) = .empty; + errdefer roots.deinit(self.allocator); + try roots.append(self.allocator, missing_root); + return .{ .missing_attestation_roots = try roots.toOwnedSlice(self.allocator) }; + }, else => { self.logger.warn("gossip aggregation validation failed: {any}", .{err}); return .{}; @@ -785,14 +809,8 @@ pub const BeamChain = struct { self.onGossipAggregatedAttestation(signed_aggregation) catch |err| { zeam_metrics.metrics.lean_attestations_invalid_total.incr(.{ .source = "aggregation" }) catch {}; - switch (err) { - // Propagate unknown block errors to node.zig for context-aware logging - error.UnknownHeadBlock, error.UnknownSourceBlock, error.UnknownTargetBlock => return err, - else => { - self.logger.warn("gossip aggregation processing error: {any}", .{err}); - return .{}; - }, - } + self.logger.warn("gossip aggregation processing error: {any}", .{err}); + return .{}; }; zeam_metrics.metrics.lean_attestations_valid_total.incr(.{ .source = "aggregation" }) catch {}; return .{}; diff --git a/pkgs/node/src/node.zig b/pkgs/node/src/node.zig index 230860d30..1856bc55a 100644 --- a/pkgs/node/src/node.zig +++ b/pkgs/node/src/node.zig @@ -321,16 +321,15 @@ pub const BeamNode = struct { self.processCachedDescendants(processed_root); } - // Fetch any attestation head roots that were missing while processing the block. - // We only own the slice when the block was actually processed (onBlock allocates it). + // Fetch any block roots that were missing while processing a block or validating attestation/aggregation gossip. + // We own the slice whenever it's non-empty (onBlock and onGossip both allocate it). const missing_roots = result.missing_attestation_roots; - const owns_missing_roots = result.processed_block_root != null; - defer if (owns_missing_roots) self.allocator.free(missing_roots); + defer if (missing_roots.len > 0) self.allocator.free(missing_roots); - if (missing_roots.len > 0 and owns_missing_roots) { + if (missing_roots.len > 0) { self.fetchBlockByRoots(missing_roots, 0) catch |err| { self.logger.warn( - "failed to fetch {d} missing attestation head block(s) from gossip: {any}", + "failed to fetch {d} missing block root(s) from gossip: {any}", .{ missing_roots.len, err }, ); }; From 68ee3d1647e9ef12d64652fcd6d4e80a672baa22 Mon Sep 17 00:00:00 2001 From: zclawz Date: Thu, 12 Mar 2026 23:41:12 +0530 Subject: [PATCH 04/24] docs: add proxy troubleshooting note for EndOfStream build errors (#660) Co-authored-by: zclawz Co-authored-by: lilhammerfun --- README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/README.md b/README.md index 5d7000e27..3e0c42a07 100644 --- a/README.md +++ b/README.md @@ -174,6 +174,24 @@ docker build -f Dockerfile.prebuilt \ -t blockblaz/zeam:latest . ``` +### Troubleshooting + +**Build fails with `EndOfStream` error** + +If you encounter errors like: +``` +error: invalid HTTP response: EndOfStream +``` + +This may be caused by proxy environment variables interfering with Zig's HTTP client (related to the [Zig HTTP connection pool bug](https://github.com/ziglang/zig/issues/21316) mentioned above). + +Try building without proxy settings: +```bash +env -u https_proxy -u HTTPS_PROXY -u http_proxy -u HTTP_PROXY \ + -u all_proxy -u ALL_PROXY -u no_proxy -u NO_PROXY \ + zig build -Doptimize=ReleaseFast +``` + --- ## Running a Local Devnet From 4c2d69f0b89ac7216d5347f00ef8948e8310700d Mon Sep 17 00:00:00 2001 From: anshalshuklabot Date: Thu, 12 Mar 2026 23:48:39 +0530 Subject: [PATCH 05/24] pre-generated test keys for faster CI (#587) * feat: pre-generated test keys for faster CI - Add keygen command to zeam-tools for generating XMSS key pairs - Add test-keys submodule (anshalshuklabot/zeam-test-keys) with 32 pre-generated keys - Update getTestKeyManager to load pre-generated keys from disk (near-instant) - Falls back to runtime generation if test-keys submodule not initialized - build.zig: add xmss and types imports to tools target Each XMSS key takes ~1 min to generate at runtime. Loading 32 pre-generated keys from SSZ files is near-instant, significantly speeding up CI and tests. * fix: improve pre-generated key loading - Search upward for test-keys/ directory (handles test runners in subdirs) - Load pre-generated keys for first 32 validators, generate rest at runtime - If 40 validators needed: loads 32 from disk + generates 8 at runtime - Fixes CI failure where keys weren't found * ci: add submodules: recursive to all checkout steps Ensures test-keys submodule is available in every CI job. Fixes CACHE MISS in Dummy prove jobs where pre-generated keys weren't found. * fix: add Rust build dependency to tools target tools now imports @zeam/xmss which requires libhashsig_glue.a. Without this dependency, the tools binary and tests fail to link on CI where Rust libs aren't pre-built in the cache. * fix: use addRustGlueLib for tools target to link Rust static libs The tools executable was missing proper Rust library linking (libhashsig_glue.a, libmultisig_glue.a, liblibp2p_glue.a) and platform-specific frameworks (macOS). Replace manual step.dependOn with addRustGlueLib() which handles object files, linkLibC, linkSystemLibrary(unwind), and macOS frameworks. * fix: add Rust build step dependency for tools target addRustGlueLib only adds object files and link flags but not the step dependency that ensures Rust libs are built first. Both dependOn AND addRustGlueLib are needed, matching how all other targets are wired. * fix: resolve test-keys path via build.zig absolute path The relative path search for test-keys/hash-sig-keys failed because Zig test binaries run from cache directories, not the repo root. Fix by injecting the absolute repo path as a build option (test_keys_path) from build.zig using pathFromRoot(), and add build_options import to key-manager module. All 5 build commands verified locally: - zig build all - zig build test - zig build simtest - zig build spectest:generate - zig build spectest * fix: handle partial preload failures and fix keypair memory leak Two fixes: 1. Track actually_loaded count instead of assuming all num_preloaded keys were inserted. On partial failure (corrupt SSZ, addKeypair error), the fallback loop now starts from the correct index, so no validators are left without keys. 2. Replace boolean owns_keypairs with num_owned_keypairs counter. Pre- generated keys (index < num_owned_keypairs) are freed on deinit while cached runtime keys (index >= num_owned_keypairs) are skipped. This prevents leaking ~20MB private keys when the mixed ownership path is taken. * fix: track per-key ownership to prevent leaks in CLI and mixed paths Replace index-threshold ownership check with a per-key owned_keys hashmap. addKeypair() marks keys as owned (freed on deinit), addCachedKeypair() does not (for borrowed/cached runtime keys). This fixes the regression where CLI-loaded keys (arbitrary validator indices) were never freed because num_owned_keypairs stayed 0. Now any caller using addKeypair() gets correct cleanup regardless of index order. * chore: remove plans folder * style: zig fmt * chore: update test-keys submodule URL to blockblaz org --------- Co-authored-by: anshalshuklabot Co-authored-by: Anshal Shukla <53994948+anshalshukla@users.noreply.github.com> --- .github/workflows/ci.yml | 9 +++ .gitmodules | 3 + build.zig | 11 +++ pkgs/key-manager/src/lib.zig | 118 +++++++++++++++++++++++++---- pkgs/tools/src/main.zig | 143 +++++++++++++++++++++++++++++++++++ test-keys | 1 + 6 files changed, 271 insertions(+), 14 deletions(-) create mode 160000 test-keys diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d08ff6728..88aba60e7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,6 +18,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + with: + submodules: recursive - name: Set up Zig uses: mlugg/setup-zig@v2.0.5 @@ -80,6 +82,8 @@ jobs: os: [ubuntu-latest, macos-latest] steps: - uses: actions/checkout@v4 + with: + submodules: recursive - name: Free disk space (Ubuntu) if: runner.os == 'Linux' @@ -149,6 +153,8 @@ jobs: os: [ubuntu-latest, macos-latest] steps: - uses: actions/checkout@v4 + with: + submodules: recursive - name: Free disk space (Ubuntu) if: runner.os == 'Linux' @@ -318,6 +324,8 @@ jobs: os: [ubuntu-latest, macos-latest] steps: - uses: actions/checkout@v4 + with: + submodules: recursive - name: Free disk space (Ubuntu) if: runner.os == 'Linux' @@ -388,6 +396,7 @@ jobs: steps: - uses: actions/checkout@v4 with: + submodules: recursive fetch-depth: 0 # Fetch full history to get git commit info - name: Free disk space (Ubuntu) diff --git a/.gitmodules b/.gitmodules index fbc272db1..0c25c3085 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,6 @@ [submodule "leanSpec"] path = leanSpec url = https://github.com/leanEthereum/leanSpec +[submodule "test-keys"] + path = test-keys + url = https://github.com/blockblaz/zeam-test-keys.git diff --git a/build.zig b/build.zig index 43e2da5a6..588b0450e 100644 --- a/build.zig +++ b/build.zig @@ -156,6 +156,8 @@ pub fn build(b: *Builder) !void { build_options.addOption(bool, "has_openvm", prover == .openvm or prover == .all); const use_poseidon = b.option(bool, "use_poseidon", "Use Poseidon SSZ hasher (default: false)") orelse false; build_options.addOption(bool, "use_poseidon", use_poseidon); + // Absolute path to test-keys for pre-generated validator keys + build_options.addOption([]const u8, "test_keys_path", b.pathFromRoot("test-keys/hash-sig-keys")); const build_options_module = build_options.createModule(); // add zeam-utils @@ -240,6 +242,7 @@ pub fn build(b: *Builder) !void { .target = target, .optimize = optimize, }); + zeam_key_manager.addImport("build_options", build_options_module); zeam_key_manager.addImport("@zeam/xmss", zeam_xmss); zeam_key_manager.addImport("@zeam/types", zeam_types); zeam_key_manager.addImport("@zeam/utils", zeam_utils); @@ -421,6 +424,10 @@ pub fn build(b: *Builder) !void { tools_cli_exe.root_module.addImport("enr", enr); tools_cli_exe.root_module.addImport("build_options", build_options_module); tools_cli_exe.root_module.addImport("simargs", simargs); + tools_cli_exe.root_module.addImport("@zeam/xmss", zeam_xmss); + tools_cli_exe.root_module.addImport("@zeam/types", zeam_types); + tools_cli_exe.step.dependOn(&build_rust_lib_steps.step); + addRustGlueLib(b, tools_cli_exe, target, prover); const install_tools_cli = b.addInstallArtifact(tools_cli_exe, .{}); tools_step.dependOn(&install_tools_cli.step); @@ -602,6 +609,10 @@ pub fn build(b: *Builder) !void { .root_module = tools_cli_exe.root_module, }); tools_cli_tests.root_module.addImport("enr", enr); + tools_cli_tests.root_module.addImport("@zeam/xmss", zeam_xmss); + tools_cli_tests.root_module.addImport("@zeam/types", zeam_types); + tools_cli_tests.step.dependOn(&build_rust_lib_steps.step); + addRustGlueLib(b, tools_cli_tests, target, prover); const run_tools_cli_test = b.addRunArtifact(tools_cli_tests); setTestRunLabelFromCompile(b, run_tools_cli_test, tools_cli_tests); tools_test_step.dependOn(&run_tools_cli_test.step); diff --git a/pkgs/key-manager/src/lib.zig b/pkgs/key-manager/src/lib.zig index ce3e933d3..33c7f326b 100644 --- a/pkgs/key-manager/src/lib.zig +++ b/pkgs/key-manager/src/lib.zig @@ -55,7 +55,8 @@ fn getOrCreateCachedKeyPair( pub const KeyManager = struct { keys: std.AutoHashMap(usize, xmss.KeyPair), allocator: Allocator, - owns_keypairs: bool, + /// Tracks which keypairs are owned (allocated by us) vs borrowed (cached). + owned_keys: std.AutoHashMap(usize, void), const Self = @This(); @@ -63,22 +64,30 @@ pub const KeyManager = struct { return Self{ .keys = std.AutoHashMap(usize, xmss.KeyPair).init(allocator), .allocator = allocator, - .owns_keypairs = true, + .owned_keys = std.AutoHashMap(usize, void).init(allocator), }; } pub fn deinit(self: *Self) void { - if (self.owns_keypairs) { - var it = self.keys.iterator(); - while (it.next()) |entry| { + var it = self.keys.iterator(); + while (it.next()) |entry| { + if (self.owned_keys.contains(entry.key_ptr.*)) { entry.value_ptr.deinit(); } } self.keys.deinit(); + self.owned_keys.deinit(); } + /// Add an owned keypair that will be freed on deinit. pub fn addKeypair(self: *Self, validator_id: usize, keypair: xmss.KeyPair) !void { try self.keys.put(validator_id, keypair); + try self.owned_keys.put(validator_id, {}); + } + + /// Add a cached/borrowed keypair that will NOT be freed on deinit. + pub fn addCachedKeypair(self: *Self, validator_id: usize, keypair: xmss.KeyPair) !void { + try self.keys.put(validator_id, keypair); } pub fn loadFromKeypairDir(_: *Self, _: []const u8) !void { @@ -162,24 +171,105 @@ pub const KeyManager = struct { } }; +/// Maximum size of a serialized XMSS private key (20MB). +const MAX_SK_SIZE = 1024 * 1024 * 20; + +/// Maximum size of a serialized XMSS public key (256 bytes). +const MAX_PK_SIZE = 256; + +/// Number of pre-generated test keys available in the test-keys submodule. +const NUM_PREGENERATED_KEYS: usize = 32; + +const build_options = @import("build_options"); + +/// Find the test-keys directory using the repo root path injected by build.zig. +fn findTestKeysDir() ?[]const u8 { + const keys_path = build_options.test_keys_path; + if (keys_path.len == 0) return null; + + // Verify it actually exists at runtime + if (std.fs.cwd().openDir(keys_path, .{})) |dir| { + var d = dir; + d.close(); + return keys_path; + } else |_| {} + + return null; +} + +/// Load a single pre-generated key pair from SSZ files on disk. +fn loadPreGeneratedKey( + allocator: Allocator, + keys_dir: []const u8, + index: usize, +) !xmss.KeyPair { + // Build file paths + var sk_path_buf: [512]u8 = undefined; + const sk_path = std.fmt.bufPrint(&sk_path_buf, "{s}/validator_{d}_sk.ssz", .{ keys_dir, index }) catch unreachable; + + var pk_path_buf: [512]u8 = undefined; + const pk_path = std.fmt.bufPrint(&pk_path_buf, "{s}/validator_{d}_pk.ssz", .{ keys_dir, index }) catch unreachable; + + // Read private key + var sk_file = try std.fs.cwd().openFile(sk_path, .{}); + defer sk_file.close(); + const sk_data = try sk_file.readToEndAlloc(allocator, MAX_SK_SIZE); + defer allocator.free(sk_data); + + // Read public key + var pk_file = try std.fs.cwd().openFile(pk_path, .{}); + defer pk_file.close(); + const pk_data = try pk_file.readToEndAlloc(allocator, MAX_PK_SIZE); + defer allocator.free(pk_data); + + // Reconstruct keypair from SSZ + return xmss.KeyPair.fromSsz(allocator, sk_data, pk_data); +} + pub fn getTestKeyManager( allocator: Allocator, num_validators: usize, max_slot: usize, ) !KeyManager { var key_manager = KeyManager.init(allocator); - key_manager.owns_keypairs = false; errdefer key_manager.deinit(); - var num_active_epochs = max_slot + 1; - // to reuse cached keypairs, gen for 10 since most tests ask for < 10 max slot including - // building mock chain for tests. otherwise getOrCreateCachedKeyPair might cleanup previous - // key generated for smaller life time - if (num_active_epochs < 10) num_active_epochs = 10; + // Determine how many keys we can load from pre-generated files + const keys_dir = findTestKeysDir(); + const num_preloaded = if (keys_dir != null) + @min(num_validators, NUM_PREGENERATED_KEYS) + else + 0; + + // Load pre-generated keys (fast path: near-instant from SSZ files) + var actually_loaded: usize = 0; + if (keys_dir) |dir| { + for (0..num_preloaded) |i| { + const keypair = loadPreGeneratedKey(allocator, dir, i) catch |err| { + std.debug.print("Failed to load pre-generated key {d}: {}\n", .{ i, err }); + break; + }; + key_manager.addKeypair(i, keypair) catch |err| { + std.debug.print("Failed to add pre-generated key {d}: {}\n", .{ i, err }); + break; + }; + actually_loaded += 1; + } + std.debug.print("Loaded {d} pre-generated test keys from {s}\n", .{ actually_loaded, dir }); + } else { + std.debug.print("Pre-generated keys not found, generating all keys at runtime\n", .{}); + } + + // Generate remaining keys at runtime (for validators beyond the loaded set) + if (num_validators > actually_loaded) { + var num_active_epochs = max_slot + 1; + if (num_active_epochs < 10) num_active_epochs = 10; - for (0..num_validators) |i| { - const keypair = try getOrCreateCachedKeyPair(i, num_active_epochs); - try key_manager.addKeypair(i, keypair); + for (actually_loaded..num_validators) |i| { + const keypair = try getOrCreateCachedKeyPair(i, num_active_epochs); + try key_manager.addCachedKeypair(i, keypair); + } + std.debug.print("Generated {d} additional keys at runtime\n", .{num_validators - actually_loaded}); } return key_manager; diff --git a/pkgs/tools/src/main.zig b/pkgs/tools/src/main.zig index 44d52fd66..43b9c25f9 100644 --- a/pkgs/tools/src/main.zig +++ b/pkgs/tools/src/main.zig @@ -2,6 +2,7 @@ const std = @import("std"); const enr = @import("enr"); const build_options = @import("build_options"); const simargs = @import("simargs"); +const xmss = @import("@zeam/xmss"); pub const max_enr_txt_size = enr.max_enr_txt_size; @@ -11,9 +12,11 @@ const ToolsArgs = struct { __commands__: union(enum) { enrgen: ENRGenCmd, + keygen: KeyGenCmd, pub const __messages__ = .{ .enrgen = "Generate a new ENR (Ethereum Node Record)", + .keygen = "Generate pre-computed XMSS test validator keys", }; }, @@ -50,6 +53,27 @@ const ToolsArgs = struct { .help = "Show help information for the enrgen command", }; }; + + const KeyGenCmd = struct { + @"num-validators": usize = 32, + @"num-active-epochs": usize = 1000, + @"output-dir": []const u8 = "test-keys", + help: bool = false, + + pub const __shorts__ = .{ + .@"num-validators" = .n, + .@"num-active-epochs" = .e, + .@"output-dir" = .o, + .help = .h, + }; + + pub const __messages__ = .{ + .@"num-validators" = "Number of validator key pairs to generate (default: 32)", + .@"num-active-epochs" = "Number of active epochs for each key (default: 1000)", + .@"output-dir" = "Output directory for generated keys (default: test-keys)", + .help = "Show help information for the keygen command", + }; + }; }; pub fn main() !void { @@ -86,6 +110,12 @@ pub fn main() !void { defer enr.deinitGlobalSecp256k1Ctx(); switch (opts.args.__commands__) { + .keygen => |cmd| { + handleKeyGen(allocator, cmd) catch |err| { + std.debug.print("Error generating keys: {}\n", .{err}); + std.process.exit(1); + }; + }, .enrgen => |cmd| { handleENRGen(cmd) catch |err| switch (err) { error.EmptySecretKey => { @@ -113,6 +143,119 @@ pub fn main() !void { } } +fn handleKeyGen(allocator: std.mem.Allocator, cmd: ToolsArgs.KeyGenCmd) !void { + const num_validators = cmd.@"num-validators"; + const num_active_epochs = cmd.@"num-active-epochs"; + const output_dir = cmd.@"output-dir"; + + std.debug.print("Generating {d} validator keys with {d} active epochs...\n", .{ num_validators, num_active_epochs }); + std.debug.print("Output directory: {s}\n", .{output_dir}); + + // Create output directories + const hash_sig_dir = try std.fmt.allocPrint(allocator, "{s}/hash-sig-keys", .{output_dir}); + defer allocator.free(hash_sig_dir); + + std.fs.cwd().makePath(hash_sig_dir) catch |err| { + std.debug.print("Error creating directory {s}: {}\n", .{ hash_sig_dir, err }); + return err; + }; + + // Allocate buffers for serialization + // Private keys can be very large (~5-10MB for XMSS) + const sk_buffer = try allocator.alloc(u8, 1024 * 1024 * 20); // 20MB + defer allocator.free(sk_buffer); + var pk_buffer: [256]u8 = undefined; + + // Open manifest file + const manifest_path = try std.fmt.allocPrint(allocator, "{s}/validator-keys-manifest.yaml", .{output_dir}); + defer allocator.free(manifest_path); + const manifest_file = try std.fs.cwd().createFile(manifest_path, .{}); + defer manifest_file.close(); + + var manifest_buf: [4096]u8 = undefined; + var manifest_writer = manifest_file.writer(&manifest_buf); + + // Write manifest header + try manifest_writer.interface.print( + \\key_scheme: SIGTopLevelTargetSumLifetime32Dim64Base8 + \\hash_function: Poseidon2 + \\encoding: TargetSum + \\lifetime: {d} + \\num_active_epochs: {d} + \\num_validators: {d} + \\validators: + \\ + , .{ num_active_epochs, num_active_epochs, num_validators }); + + for (0..num_validators) |i| { + std.debug.print(" Generating validator {d}/{d}...\n", .{ i + 1, num_validators }); + + // Generate keypair with deterministic seed + const seed = try std.fmt.allocPrint(allocator, "test_validator_{d}", .{i}); + defer allocator.free(seed); + + var keypair = try xmss.KeyPair.generate(allocator, seed, 0, num_active_epochs); + defer keypair.deinit(); + + // Serialize public key + const pk_len = try keypair.pubkeyToBytes(&pk_buffer); + + // Serialize private key + const sk_len = try keypair.privkeyToBytes(sk_buffer); + + std.debug.print(" PK size: {d} bytes, SK size: {d} bytes\n", .{ pk_len, sk_len }); + + // Write private key file + const sk_filename = try std.fmt.allocPrint(allocator, "validator_{d}_sk.ssz", .{i}); + defer allocator.free(sk_filename); + const sk_path = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ hash_sig_dir, sk_filename }); + defer allocator.free(sk_path); + + const sk_file = try std.fs.cwd().createFile(sk_path, .{}); + defer sk_file.close(); + var sk_write_buf: [65536]u8 = undefined; + var sk_writer = sk_file.writer(&sk_write_buf); + try sk_writer.interface.writeAll(sk_buffer[0..sk_len]); + try sk_writer.interface.flush(); + + // Write public key file + const pk_filename = try std.fmt.allocPrint(allocator, "validator_{d}_pk.ssz", .{i}); + defer allocator.free(pk_filename); + const pk_path = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ hash_sig_dir, pk_filename }); + defer allocator.free(pk_path); + + const pk_file = try std.fs.cwd().createFile(pk_path, .{}); + defer pk_file.close(); + var pk_write_buf: [4096]u8 = undefined; + var pk_writer = pk_file.writer(&pk_write_buf); + try pk_writer.interface.writeAll(pk_buffer[0..pk_len]); + try pk_writer.interface.flush(); + + // Write manifest entry with pubkey as hex + // Format pubkey bytes as hex string + var hex_buf: [512]u8 = undefined; + const hex_len = pk_len * 2; + for (pk_buffer[0..pk_len], 0..) |byte, j| { + const high = byte >> 4; + const low = byte & 0x0f; + hex_buf[j * 2] = if (high < 10) '0' + high else 'a' + high - 10; + hex_buf[j * 2 + 1] = if (low < 10) '0' + low else 'a' + low - 10; + } + + try manifest_writer.interface.print( + \\- index: {d} + \\ pubkey_hex: "0x{s}" + \\ privkey_file: {s} + \\ + , .{ i, hex_buf[0..hex_len], sk_filename }); + } + + try manifest_writer.interface.flush(); + + std.debug.print("\nDone! Generated {d} keys in {s}/\n", .{ num_validators, output_dir }); + std.debug.print("Manifest written to {s}\n", .{manifest_path}); +} + fn handleENRGen(cmd: ToolsArgs.ENRGenCmd) !void { if (cmd.sk.len == 0) { return error.EmptySecretKey; diff --git a/test-keys b/test-keys new file mode 160000 index 000000000..0b645ebd2 --- /dev/null +++ b/test-keys @@ -0,0 +1 @@ +Subproject commit 0b645ebd2302636330689de12afe3e4e8dfde3df From 0d0a98940dcdca1d6e945b80a69a1d27e5664098 Mon Sep 17 00:00:00 2001 From: zclawz Date: Fri, 13 Mar 2026 12:10:55 +0530 Subject: [PATCH 06/24] fix: consolidate key loading logic into key-manager (#662) * fix: consolidate key loading logic into key-manager (closes #661) Extract a shared loadKeypairFromFiles() helper in the key-manager package and use it across all callers: - pkgs/key-manager/src/lib.zig: add public loadKeypairFromFiles(allocator, sk_path, pk_path) and expose MAX_SK_SIZE / MAX_PK_SIZE; refactor loadPreGeneratedKey to delegate to it - pkgs/cli/src/node.zig: replace duplicated open/read/fromSsz sequence in loadHashSigKeys() with key_manager_lib.loadKeypairFromFiles(); remove now-unused xmss and constants imports - pkgs/cli/src/main.zig: replace duplicated open/read/fromSsz sequence in the testsig command with key_manager_lib.loadKeypairFromFiles(); hoist key_manager_lib import to file scope and remove inner-scope duplicate * fix: preserve distinct HashSigSecretKeyMissing/HashSigPublicKeyMissing errors in loadKeypairFromFiles --------- Co-authored-by: zclawz Co-authored-by: zclawz --- pkgs/cli/src/main.zig | 28 +++----------------- pkgs/cli/src/node.zig | 26 +++---------------- pkgs/key-manager/src/lib.zig | 50 ++++++++++++++++++++++++------------ 3 files changed, 40 insertions(+), 64 deletions(-) diff --git a/pkgs/cli/src/main.zig b/pkgs/cli/src/main.zig index 4b330bcf3..8ffc93e12 100644 --- a/pkgs/cli/src/main.zig +++ b/pkgs/cli/src/main.zig @@ -28,6 +28,7 @@ const Chain = configs.Chain; const ChainOptions = configs.ChainOptions; const utils_lib = @import("@zeam/utils"); +const key_manager_lib = @import("@zeam/key-manager"); const zeam_metrics = @import("@zeam/metrics"); const database = @import("@zeam/database"); @@ -396,7 +397,6 @@ fn mainInner() !void { var chain_options = (try json.parseFromSlice(ChainOptions, gpa.allocator(), chain_spec, options)).value; // Create key manager FIRST to get validator pubkeys for genesis - const key_manager_lib = @import("@zeam/key-manager"); // Using 3 validators for 3-node setup with initial sync testing // Nodes 1,2 start immediately; Node 3 starts after finalization to test sync const num_validators: usize = 3; @@ -794,30 +794,8 @@ fn mainInner() !void { }; defer allocator.free(sk_path); - const pk_file = std.fs.cwd().openFile(key_path, .{}) catch |err| { - ErrorHandler.logErrorWithDetails(err, "open public key file", .{ .path = key_path }); - return err; - }; - defer pk_file.close(); - const pk_bytes = pk_file.readToEndAlloc(allocator, 256) catch |err| { - ErrorHandler.logErrorWithOperation(err, "read public key file"); - return err; - }; - defer allocator.free(pk_bytes); - - const sk_file = std.fs.cwd().openFile(sk_path, .{}) catch |err| { - ErrorHandler.logErrorWithDetails(err, "open private key file", .{ .path = sk_path }); - return err; - }; - defer sk_file.close(); - const sk_bytes = sk_file.readToEndAlloc(allocator, 16 * 1024 * 1024) catch |err| { - ErrorHandler.logErrorWithOperation(err, "read private key file"); - return err; - }; - defer allocator.free(sk_bytes); - - keypair = xmss.KeyPair.fromSsz(allocator, sk_bytes, pk_bytes) catch |err| { - ErrorHandler.logErrorWithOperation(err, "load keypair from SSZ"); + keypair = key_manager_lib.loadKeypairFromFiles(allocator, sk_path, key_path) catch |err| { + ErrorHandler.logErrorWithOperation(err, "load keypair from SSZ files"); return err; }; } else if (cmd.@"private-key") |seed| { diff --git a/pkgs/cli/src/node.zig b/pkgs/cli/src/node.zig index fef60d528..a808a469a 100644 --- a/pkgs/cli/src/node.zig +++ b/pkgs/cli/src/node.zig @@ -17,14 +17,12 @@ const networks = @import("@zeam/network"); const Multiaddr = @import("multiaddr").Multiaddr; const node_lib = @import("@zeam/node"); const key_manager_lib = @import("@zeam/key-manager"); -const xmss = @import("@zeam/xmss"); const Clock = node_lib.Clock; const BeamNode = node_lib.BeamNode; const types = @import("@zeam/types"); const LoggerConfig = utils_lib.ZeamLoggerConfig; const NodeCommand = @import("main.zig").NodeCommand; const zeam_utils = @import("@zeam/utils"); -const constants = @import("constants.zig"); const database = @import("@zeam/database"); const json = std.json; const utils = @import("@zeam/utils"); @@ -452,29 +450,11 @@ pub const Node = struct { const pk_path = try std.fmt.allocPrint(self.allocator, "{s}/{s}_pk.ssz", .{ hash_sig_key_dir, base }); defer self.allocator.free(pk_path); - // Read secret key - var sk_file = std.fs.cwd().openFile(sk_path, .{}) catch |err| switch (err) { - error.FileNotFound => return error.HashSigSecretKeyMissing, + var keypair = key_manager_lib.loadKeypairFromFiles(self.allocator, sk_path, pk_path) catch |err| switch (err) { + error.SecretKeyFileNotFound => return error.HashSigSecretKeyMissing, + error.PublicKeyFileNotFound => return error.HashSigPublicKeyMissing, else => return err, }; - defer sk_file.close(); - const secret_ssz = try sk_file.readToEndAlloc(self.allocator, constants.MAX_HASH_SIG_ENCODED_KEY_SIZE); - defer self.allocator.free(secret_ssz); - - // Read public key - var pk_file = std.fs.cwd().openFile(pk_path, .{}) catch |err| switch (err) { - error.FileNotFound => return error.HashSigPublicKeyMissing, - else => return err, - }; - defer pk_file.close(); - const public_ssz = try pk_file.readToEndAlloc(self.allocator, constants.MAX_HASH_SIG_ENCODED_KEY_SIZE); - defer self.allocator.free(public_ssz); - - var keypair = try xmss.KeyPair.fromSsz( - self.allocator, - secret_ssz, - public_ssz, - ); errdefer keypair.deinit(); try self.key_manager.addKeypair(assignment.index, keypair); diff --git a/pkgs/key-manager/src/lib.zig b/pkgs/key-manager/src/lib.zig index 33c7f326b..e1a961d21 100644 --- a/pkgs/key-manager/src/lib.zig +++ b/pkgs/key-manager/src/lib.zig @@ -172,10 +172,41 @@ pub const KeyManager = struct { }; /// Maximum size of a serialized XMSS private key (20MB). -const MAX_SK_SIZE = 1024 * 1024 * 20; +pub const MAX_SK_SIZE = 1024 * 1024 * 20; /// Maximum size of a serialized XMSS public key (256 bytes). -const MAX_PK_SIZE = 256; +pub const MAX_PK_SIZE = 256; + +/// Load an XMSS keypair from SSZ files on disk. +/// +/// `sk_path` must point to the secret key SSZ file (`*_sk.ssz`). +/// `pk_path` must point to the public key SSZ file (`*_pk.ssz`). +/// +/// Returns a fully initialised `xmss.KeyPair`. The caller owns the keypair +/// and must call `keypair.deinit()` when it is no longer needed. +pub fn loadKeypairFromFiles( + allocator: Allocator, + sk_path: []const u8, + pk_path: []const u8, +) !xmss.KeyPair { + var sk_file = std.fs.cwd().openFile(sk_path, .{}) catch |err| switch (err) { + error.FileNotFound => return error.SecretKeyFileNotFound, + else => return err, + }; + defer sk_file.close(); + const sk_data = try sk_file.readToEndAlloc(allocator, MAX_SK_SIZE); + defer allocator.free(sk_data); + + var pk_file = std.fs.cwd().openFile(pk_path, .{}) catch |err| switch (err) { + error.FileNotFound => return error.PublicKeyFileNotFound, + else => return err, + }; + defer pk_file.close(); + const pk_data = try pk_file.readToEndAlloc(allocator, MAX_PK_SIZE); + defer allocator.free(pk_data); + + return xmss.KeyPair.fromSsz(allocator, sk_data, pk_data); +} /// Number of pre-generated test keys available in the test-keys submodule. const NUM_PREGENERATED_KEYS: usize = 32; @@ -210,20 +241,7 @@ fn loadPreGeneratedKey( var pk_path_buf: [512]u8 = undefined; const pk_path = std.fmt.bufPrint(&pk_path_buf, "{s}/validator_{d}_pk.ssz", .{ keys_dir, index }) catch unreachable; - // Read private key - var sk_file = try std.fs.cwd().openFile(sk_path, .{}); - defer sk_file.close(); - const sk_data = try sk_file.readToEndAlloc(allocator, MAX_SK_SIZE); - defer allocator.free(sk_data); - - // Read public key - var pk_file = try std.fs.cwd().openFile(pk_path, .{}); - defer pk_file.close(); - const pk_data = try pk_file.readToEndAlloc(allocator, MAX_PK_SIZE); - defer allocator.free(pk_data); - - // Reconstruct keypair from SSZ - return xmss.KeyPair.fromSsz(allocator, sk_data, pk_data); + return loadKeypairFromFiles(allocator, sk_path, pk_path); } pub fn getTestKeyManager( From 219001387825279ce5542850c237fa7dc0c15396 Mon Sep 17 00:00:00 2001 From: zclawz Date: Fri, 13 Mar 2026 12:11:55 +0530 Subject: [PATCH 07/24] fix: use while loop for reverse iteration (closes #664) (#665) Co-authored-by: zclawz --- pkgs/node/src/forkchoice.zig | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/pkgs/node/src/forkchoice.zig b/pkgs/node/src/forkchoice.zig index 51ff791f3..448db64ec 100644 --- a/pkgs/node/src/forkchoice.zig +++ b/pkgs/node/src/forkchoice.zig @@ -147,8 +147,10 @@ pub const ProtoArray = struct { } // iterate backwards apply deltas and propagating deltas to parents - for (0..self.nodes.items.len) |i| { - const node_idx = self.nodes.items.len - 1 - i; + var node_idx_a = self.nodes.items.len; + while (node_idx_a > 0) { + node_idx_a -= 1; + const node_idx = node_idx_a; const node_delta = deltas[node_idx]; self.nodes.items[node_idx].weight += node_delta; if (self.nodes.items[node_idx].parent) |parent_idx| { @@ -158,8 +160,10 @@ pub const ProtoArray = struct { // re-iterate backwards and calc best child and descendant // there seems to be no filter block tree in the mini3sf fc - for (0..self.nodes.items.len) |i| { - const node_idx = self.nodes.items.len - 1 - i; + var node_idx_b = self.nodes.items.len; + while (node_idx_b > 0) { + node_idx_b -= 1; + const node_idx = node_idx_b; const node = self.nodes.items[node_idx]; if (self.nodes.items[node_idx].parent) |parent_idx| { From e8d6ac641f0a3a8b5cd87d5f370469015ef951f7 Mon Sep 17 00:00:00 2001 From: zclawz Date: Fri, 13 Mar 2026 21:38:25 +0530 Subject: [PATCH 08/24] fix: skip STF re-processing for blocks already known to fork choice (#671) * fix: skip STF re-processing for blocks already known to fork choice (closes #669) * fix: also skip STF re-processing in processCachedDescendants if block already known to fork choice --------- Co-authored-by: zclawz --- pkgs/node/src/node.zig | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/pkgs/node/src/node.zig b/pkgs/node/src/node.zig index 1856bc55a..d18c16471 100644 --- a/pkgs/node/src/node.zig +++ b/pkgs/node/src/node.zig @@ -391,6 +391,17 @@ pub const BeamNode = struct { // Try to process each descendant for (descendants_to_process.items) |descendant_root| { if (self.network.getFetchedBlock(descendant_root)) |cached_block| { + // Skip if already known to fork choice — same guard as processBlockByRootChunk + if (self.chain.forkChoice.hasBlock(descendant_root)) { + self.logger.debug( + "cached block 0x{x} is already known to fork choice, skipping re-processing", + .{&descendant_root}, + ); + _ = self.network.removeFetchedBlock(descendant_root); + self.processCachedDescendants(descendant_root); + continue; + } + self.logger.debug( "Attempting to process cached block 0x{x}", .{&descendant_root}, @@ -602,6 +613,19 @@ pub const BeamNode = struct { }); } + // Skip STF re-processing if the block is already known to fork choice + // (e.g. the checkpoint sync anchor block — it is the trust root and does not + // need state-transition re-processing; re-processing it would cause an infinite + // fetch loop because onBlock would always see it as "already processed"). + if (self.chain.forkChoice.hasBlock(block_root)) { + self.logger.debug( + "block 0x{x} is already known to fork choice, skipping re-processing", + .{&block_root}, + ); + self.processCachedDescendants(block_root); + return; + } + // Try to add the block to the chain const missing_roots = self.chain.onBlock(signed_block.*, .{}) catch |err| { // Check if the error is due to missing parent From 5f5fe9ea7cd8e9ef041e7eb189512995f64cd099 Mon Sep 17 00:00:00 2001 From: zclawz Date: Sat, 14 Mar 2026 01:46:47 +0530 Subject: [PATCH 09/24] fix: replace {any} with {f} for types with format methods, fix GossipTopic format signature (closes #594) (#673) * fix: replace {any} with {f} for types with format methods, fix GossipTopic format signature (closes #594) * fix: replace all {any} with {f} for types with custom format methods (closes #594) --------- Co-authored-by: zclawz --- pkgs/cli/src/main.zig | 4 +- pkgs/network/src/ethlibp2p.zig | 4 +- pkgs/network/src/interface.zig | 12 +- pkgs/node/src/chain.zig | 6 +- pkgs/node/src/node.zig | 2 +- pkgs/spectest/src/json_expect.zig | 38 ++--- .../src/runner/fork_choice_runner.zig | 146 +++++++++--------- 7 files changed, 105 insertions(+), 107 deletions(-) diff --git a/pkgs/cli/src/main.zig b/pkgs/cli/src/main.zig index 8ffc93e12..7697c2b13 100644 --- a/pkgs/cli/src/main.zig +++ b/pkgs/cli/src/main.zig @@ -484,7 +484,7 @@ fn mainInner() !void { backend1 = network.getNetworkInterface(); backend2 = network.getNetworkInterface(); backend3 = network.getNetworkInterface(); - logger1_config.logger(null).debug("--- mock gossip {any}", .{backend1.gossip}); + logger1_config.logger(null).debug("--- mock gossip {f}", .{backend1.gossip}); } else { network1 = try allocator.create(networks.EthLibp2p); const key_pair1 = enr_lib.KeyPair.generate(); @@ -554,7 +554,7 @@ fn mainInner() !void { .attestation_committee_count = chain_config.spec.attestation_committee_count, }, logger3_config.logger(.network)); backend3 = network3.getNetworkInterface(); - logger1_config.logger(null).debug("--- ethlibp2p gossip {any}", .{backend1.gossip}); + logger1_config.logger(null).debug("--- ethlibp2p gossip {f}", .{backend1.gossip}); } var clock = try allocator.create(Clock); diff --git a/pkgs/network/src/ethlibp2p.zig b/pkgs/network/src/ethlibp2p.zig index ba39d710e..9e8343e79 100644 --- a/pkgs/network/src/ethlibp2p.zig +++ b/pkgs/network/src/ethlibp2p.zig @@ -385,7 +385,7 @@ export fn handleMsgFromRustBridge(zigHandler: *EthLibp2p, topic_str: [*:0]const }, .aggregation => |signed_aggregation| { zigHandler.logger.debug( - "network-{d}:: received gossip aggregation slot={d} (compressed={d}B, raw={d}B) from peer={s}{any}", + "network-{d}:: received gossip aggregation slot={d} (compressed={d}B, raw={d}B) from peer={s}{f}", .{ zigHandler.params.networkId, signed_aggregation.data.slot, @@ -400,7 +400,7 @@ export fn handleMsgFromRustBridge(zigHandler: *EthLibp2p, topic_str: [*:0]const // Debug-only JSON dump (conversion happens only if debug is actually emitted). zigHandler.logger.debug( - "network-{d}:: gossip payload json topic={s} from peer={s}{f}: {any}", + "network-{d}:: gossip payload json topic={s} from peer={s}{f}: {f}", .{ zigHandler.params.networkId, std.mem.span(topic_str), diff --git a/pkgs/network/src/interface.zig b/pkgs/network/src/interface.zig index 574fdfcbc..d4cb1a6be 100644 --- a/pkgs/network/src/interface.zig +++ b/pkgs/network/src/interface.zig @@ -222,9 +222,7 @@ pub const GossipTopic = struct { return GossipTopic{ .kind = kind }; } - pub fn format(self: GossipTopic, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { - _ = fmt; - _ = options; + pub fn format(self: GossipTopic, writer: anytype) !void { switch (self.kind) { .block, .aggregation => try writer.writeAll(@tagName(self.kind)), .attestation => { @@ -726,7 +724,7 @@ const MessagePublishWrapper = struct { const Self = @This(); pub fn format(self: Self, writer: anytype) !void { - try writer.print("MessagePublishWrapper{{ networkId={d}, topic={any}, sender={s} }}", .{ + try writer.print("MessagePublishWrapper{{ networkId={d}, topic={f}, sender={s} }}", .{ self.networkId, self.data.getGossipTopic(), self.sender_peer_id, @@ -886,11 +884,11 @@ pub const GenericGossipHandler = struct { const gossip_topic = data.getGossipTopic(); const handlerArr = self.onGossipHandlers.get(gossip_topic) orelse { const node_name = self.node_registry.getNodeNameFromPeerId(sender_peer_id); - self.logger.debug("network-{d}:: ongossip no handlers for topic={any} from peer={s}{any}", .{ self.networkId, gossip_topic, sender_peer_id, node_name }); + self.logger.debug("network-{d}:: ongossip no handlers for topic={f} from peer={s}{f}", .{ self.networkId, gossip_topic, sender_peer_id, node_name }); return; }; const node_name = self.node_registry.getNodeNameFromPeerId(sender_peer_id); - self.logger.debug("network-{d}:: ongossip handlers={d} topic={any} from peer={s}{f}", .{ self.networkId, handlerArr.items.len, gossip_topic, sender_peer_id, node_name }); + self.logger.debug("network-{d}:: ongossip handlers={d} topic={f} from peer={s}{f}", .{ self.networkId, handlerArr.items.len, gossip_topic, sender_peer_id, node_name }); for (handlerArr.items) |handler| { // TODO: figure out why scheduling on the loop is not working for libp2p separate net instance @@ -898,7 +896,7 @@ pub const GenericGossipHandler = struct { if (scheduleOnLoop) { const publishWrapper = try MessagePublishWrapper.init(self.allocator, handler, data, sender_peer_id, self.networkId, self.logger); - self.logger.debug("network-{d}:: scheduling ongossip publishWrapper={f} for topic={any}", .{ self.networkId, publishWrapper, gossip_topic }); + self.logger.debug("network-{d}:: scheduling ongossip publishWrapper={f} for topic={f}", .{ self.networkId, publishWrapper, gossip_topic }); // Create a separate completion object for each handler to avoid conflicts const completion = try self.allocator.create(xev.Completion); diff --git a/pkgs/node/src/chain.zig b/pkgs/node/src/chain.zig index 96249825b..0531a851b 100644 --- a/pkgs/node/src/chain.zig +++ b/pkgs/node/src/chain.zig @@ -463,7 +463,7 @@ pub const BeamChain = struct { const block_str = try block.toJsonString(self.allocator); defer self.allocator.free(block_str); - self.logger.debug("node-{d}::going for block production opts={any} raw block={s}", .{ self.nodeId, opts, block_str }); + self.logger.debug("node-{d}::going for block production opts={f} raw block={s}", .{ self.nodeId, opts, block_str }); // 2. apply STF to get post state & update post state root & cache it try stf.apply_raw_block(self.allocator, post_state, &block, self.block_building_logger, &self.root_to_slot_cache); @@ -471,7 +471,7 @@ pub const BeamChain = struct { const block_str_2 = try block.toJsonString(self.allocator); defer self.allocator.free(block_str_2); - self.logger.debug("applied raw block opts={any} raw block={s}", .{ opts, block_str_2 }); + self.logger.debug("applied raw block opts={f} raw block={s}", .{ opts, block_str_2 }); // 3. cache state to save recompute while adding the block on publish var block_root: [32]u8 = undefined; @@ -776,7 +776,7 @@ pub const BeamChain = struct { return .{}; }, .aggregation => |signed_aggregation| { - self.logger.debug("chain received gossip aggregation for slot={d} from peer={s}{any}", .{ + self.logger.debug("chain received gossip aggregation for slot={d} from peer={s}{f}", .{ signed_aggregation.data.slot, sender_peer_id, self.node_registry.getNodeNameFromPeerId(sender_peer_id), diff --git a/pkgs/node/src/node.zig b/pkgs/node/src/node.zig index d18c16471..b7fa52ace 100644 --- a/pkgs/node/src/node.zig +++ b/pkgs/node/src/node.zig @@ -197,7 +197,7 @@ pub const BeamNode = struct { }, .aggregation => |signed_aggregation| { const sender_node_name = self.node_registry.getNodeNameFromPeerId(sender_peer_id); - self.logger.info("received gossip aggregation for slot={d} from peer={s}{any}", .{ + self.logger.info("received gossip aggregation for slot={d} from peer={s}{f}", .{ signed_aggregation.data.slot, sender_peer_id, sender_node_name, diff --git a/pkgs/spectest/src/json_expect.zig b/pkgs/spectest/src/json_expect.zig index e9f69c5ea..921eaac39 100644 --- a/pkgs/spectest/src/json_expect.zig +++ b/pkgs/spectest/src/json_expect.zig @@ -45,7 +45,7 @@ pub fn expectObject( ) FixtureError!std.json.ObjectMap { const value = getField(obj, field_names) orelse { std.debug.print( - "fixture {s} case {s}{any}: missing field {s}\n", + "fixture {s} case {s}{f}: missing field {s}\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -54,7 +54,7 @@ pub fn expectObject( .object => |map| map, else => { std.debug.print( - "fixture {s} case {s}{any}: field {s} must be object\n", + "fixture {s} case {s}{f}: field {s} must be object\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -71,7 +71,7 @@ pub fn expectStringField( ) FixtureError![]const u8 { const value = getField(obj, field_names) orelse { std.debug.print( - "fixture {s} case {s}{any}: missing field {s}\n", + "fixture {s} case {s}{f}: missing field {s}\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -88,7 +88,7 @@ pub fn expectU64Field( ) FixtureError!u64 { const value = getField(obj, field_names) orelse { std.debug.print( - "fixture {s} case {s}{any}: missing field {s}\n", + "fixture {s} case {s}{f}: missing field {s}\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -106,7 +106,7 @@ pub fn expectBytesField( ) FixtureError!T { const value = getField(obj, field_names) orelse { std.debug.print( - "fixture {s} case {s}{any}: missing hex field {s}\n", + "fixture {s} case {s}{f}: missing hex field {s}\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -124,7 +124,7 @@ pub fn expectStringValue( .string => |s| s, else => { std.debug.print( - "fixture {s} case {s}{any}: field {s} must be string\n", + "fixture {s} case {s}{f}: field {s} must be string\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -141,21 +141,21 @@ pub fn expectU64Value( return switch (value) { .integer => |i| if (i >= 0) @as(u64, @intCast(i)) else blk: { std.debug.print( - "fixture {s} case {s}{any}: field {s} negative\n", + "fixture {s} case {s}{f}: field {s} negative\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); break :blk FixtureError.InvalidFixture; }, .float => { std.debug.print( - "fixture {s} case {s}{any}: field {s} must be integer\n", + "fixture {s} case {s}{f}: field {s} must be integer\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; }, else => { std.debug.print( - "fixture {s} case {s}{any}: field {s} must be numeric\n", + "fixture {s} case {s}{f}: field {s} must be numeric\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -180,7 +180,7 @@ pub fn expectBytesValue( const text = try expectStringValue(FixtureError, value, context, label); if (text.len < 2 or !std.mem.eql(u8, text[0..2], "0x")) { std.debug.print( - "fixture {s} case {s}{any}: field {s} missing 0x prefix\n", + "fixture {s} case {s}{f}: field {s} missing 0x prefix\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -190,7 +190,7 @@ pub fn expectBytesValue( const expected_len = comptime (@typeInfo(T).array.len * 2); if (body.len != expected_len) { std.debug.print( - "fixture {s} case {s}{any}: field {s} wrong length\n", + "fixture {s} case {s}{f}: field {s} wrong length\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -199,7 +199,7 @@ pub fn expectBytesValue( var out: T = undefined; _ = std.fmt.hexToBytes(&out, body) catch { std.debug.print( - "fixture {s} case {s}{any}: field {s} invalid hex\n", + "fixture {s} case {s}{f}: field {s} invalid hex\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -217,7 +217,7 @@ pub fn expectObjectValue( .object => |map| map, else => { std.debug.print( - "fixture {s} case {s}{any}: field {s} must be object\n", + "fixture {s} case {s}{f}: field {s} must be object\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -235,7 +235,7 @@ pub fn expectArrayValue( .array => |arr| arr, else => { std.debug.print( - "fixture {s} case {s}{any}: field {s} must be array\n", + "fixture {s} case {s}{f}: field {s} must be array\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -257,7 +257,7 @@ pub fn appendBytesDataField( .array => |array| array, else => { std.debug.print( - "fixture {s} case {s}{any}: {s}.data must be array\n", + "fixture {s} case {s}{f}: {s}.data must be array\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -268,7 +268,7 @@ pub fn appendBytesDataField( const value = try expectBytesValue(FixtureError, T, item, context, label); list.append(value) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: {s} append failed: {s}\n", + "fixture {s} case {s}{f}: {s} append failed: {s}\n", .{ context.fixture_label, context.case_name, context.formatStep(), label, @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -289,7 +289,7 @@ pub fn appendBoolDataField( .array => |array| array, else => { std.debug.print( - "fixture {s} case {s}{any}: {s}.data must be array\n", + "fixture {s} case {s}{f}: {s}.data must be array\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -302,7 +302,7 @@ pub fn appendBoolDataField( .integer => |ival| ival != 0, else => { std.debug.print( - "fixture {s} case {s}{any}: {s} entries must be bool/int\n", + "fixture {s} case {s}{f}: {s} entries must be bool/int\n", .{ context.fixture_label, context.case_name, context.formatStep(), label }, ); return FixtureError.InvalidFixture; @@ -310,7 +310,7 @@ pub fn appendBoolDataField( }; list.append(flag) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: {s} append failed: {s}\n", + "fixture {s} case {s}{f}: {s} append failed: {s}\n", .{ context.fixture_label, context.case_name, context.formatStep(), label, @errorName(err) }, ); return FixtureError.InvalidFixture; diff --git a/pkgs/spectest/src/runner/fork_choice_runner.zig b/pkgs/spectest/src/runner/fork_choice_runner.zig index f8e8f43d1..590910d5e 100644 --- a/pkgs/spectest/src/runner/fork_choice_runner.zig +++ b/pkgs/spectest/src/runner/fork_choice_runner.zig @@ -393,7 +393,7 @@ fn runStep( .object => |map| map, else => { std.debug.print( - "fixture {s} case {s}{any}: expected object\n", + "fixture {s} case {s}{f}: expected object\n", .{ json_ctx.fixture_label, json_ctx.case_name, json_ctx.formatStep() }, ); return FixtureError.InvalidFixture; @@ -404,7 +404,7 @@ fn runStep( .bool => |b| b, else => { std.debug.print( - "fixture {s} case {s}{any}: valid must be bool\n", + "fixture {s} case {s}{f}: valid must be bool\n", .{ json_ctx.fixture_label, json_ctx.case_name, json_ctx.formatStep() }, ); return FixtureError.InvalidFixture; @@ -422,13 +422,13 @@ fn runStep( break :blk processTickStep(ctx, json_ctx.fixture_label, json_ctx.case_name, step_index, step_obj); } else if (std.mem.eql(u8, step_type, "attestation")) { std.debug.print( - "fixture {s} case {s}{any}: attestation steps unsupported\n", + "fixture {s} case {s}{f}: attestation steps unsupported\n", .{ json_ctx.fixture_label, json_ctx.case_name, json_ctx.formatStep() }, ); return FixtureError.UnsupportedFixture; } else { std.debug.print( - "fixture {s} case {s}{any}: unknown stepType {s}\n", + "fixture {s} case {s}{f}: unknown stepType {s}\n", .{ json_ctx.fixture_label, json_ctx.case_name, json_ctx.formatStep(), step_type }, ); return FixtureError.InvalidFixture; @@ -438,7 +438,7 @@ fn runStep( result catch |err| { if (valid_flag) { std.debug.print( - "fixture {s} case {s}{any}: unexpected error {s}\n", + "fixture {s} case {s}{f}: unexpected error {s}\n", .{ json_ctx.fixture_label, json_ctx.case_name, json_ctx.formatStep(), @errorName(err) }, ); return FixtureError.FixtureMismatch; @@ -448,7 +448,7 @@ fn runStep( if (!valid_flag) { std.debug.print( - "fixture {s} case {s}{any}: expected failure but succeeded\n", + "fixture {s} case {s}{f}: expected failure but succeeded\n", .{ json_ctx.fixture_label, json_ctx.case_name, json_ctx.formatStep() }, ); return FixtureError.FixtureMismatch; @@ -459,7 +459,7 @@ fn runStep( .object => |map| map, else => { std.debug.print( - "fixture {s} case {s}{any}: checks must be object\n", + "fixture {s} case {s}{f}: checks must be object\n", .{ json_ctx.fixture_label, json_ctx.case_name, json_ctx.formatStep() }, ); return FixtureError.InvalidFixture; @@ -604,7 +604,7 @@ fn processBlockStep( ) !void { const block_wrapper = step_obj.get("block") orelse { std.debug.print( - "fixture {s} case {s}{any}: block step missing block field\n", + "fixture {s} case {s}{f}: block step missing block field\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -632,7 +632,7 @@ fn processBlockStep( const aggregated_attestations = block.body.attestations.constSlice(); ctx.block_attestations.ensureTotalCapacity(ctx.allocator, aggregated_attestations.len) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to allocate block attestations ({s})\n", + "fixture {s} case {s}{f}: failed to allocate block attestations ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -640,7 +640,7 @@ fn processBlockStep( for (aggregated_attestations) |aggregated_attestation| { var indices = types.aggregationBitsToValidatorIndices(&aggregated_attestation.aggregation_bits, ctx.allocator) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to parse aggregation bits ({s})\n", + "fixture {s} case {s}{f}: failed to parse aggregation bits ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -648,7 +648,7 @@ fn processBlockStep( defer indices.deinit(ctx.allocator); const participants = ctx.allocator.alloc(u64, indices.items.len) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to allocate participants ({s})\n", + "fixture {s} case {s}{f}: failed to allocate participants ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -665,7 +665,7 @@ fn processBlockStep( .target_slot = aggregated_attestation.data.target.slot, }) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to record block attestation ({s})\n", + "fixture {s} case {s}{f}: failed to record block attestation ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -676,7 +676,7 @@ fn processBlockStep( var block_root: types.Root = undefined; zeam_utils.hashTreeRoot(types.BeamBlock, block, &block_root, ctx.allocator) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: hashing block failed ({s})\n", + "fixture {s} case {s}{f}: hashing block failed ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -684,7 +684,7 @@ fn processBlockStep( const parent_state_ptr = ctx.state_map.get(block.parent_root) orelse { std.debug.print( - "fixture {s} case {s}{any}: parent root 0x{x} unknown\n", + "fixture {s} case {s}{f}: parent root 0x{x} unknown\n", .{ fixture_path, case_name, formatStep(step_index), &block.parent_root }, ); return FixtureError.FixtureMismatch; @@ -702,7 +702,7 @@ fn processBlockStep( state_transition.apply_transition(ctx.allocator, new_state_ptr, block, .{ .logger = ctx.fork_logger, .validateResult = false }) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: state transition failed {s}\n", + "fixture {s} case {s}{f}: state transition failed {s}\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.FixtureMismatch; @@ -715,7 +715,7 @@ fn processBlockStep( .confirmed = true, }) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: forkchoice onBlock failed {s}\n", + "fixture {s} case {s}{f}: forkchoice onBlock failed {s}\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.FixtureMismatch; @@ -723,14 +723,14 @@ fn processBlockStep( ctx.state_map.put(ctx.allocator, block_root, new_state_ptr) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to index block state ({s})\n", + "fixture {s} case {s}{f}: failed to index block state ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; }; ctx.allocated_states.append(ctx.allocator, new_state_ptr) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to track state allocation ({s})\n", + "fixture {s} case {s}{f}: failed to track state allocation ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -740,7 +740,7 @@ fn processBlockStep( for (aggregated_attestations) |aggregated_attestation| { var indices = types.aggregationBitsToValidatorIndices(&aggregated_attestation.aggregation_bits, ctx.allocator) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to parse aggregation bits ({s})\n", + "fixture {s} case {s}{f}: failed to parse aggregation bits ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -749,7 +749,7 @@ fn processBlockStep( var proof_template = types.AggregatedSignatureProof.init(ctx.allocator) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to init proof template ({s})\n", + "fixture {s} case {s}{f}: failed to init proof template ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -761,7 +761,7 @@ fn processBlockStep( if (aggregated_attestation.aggregation_bits.get(i) catch false) { types.aggregationBitsSet(&proof_template.participants, i, true) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to set aggregation bit ({s})\n", + "fixture {s} case {s}{f}: failed to set aggregation bit ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -771,7 +771,7 @@ fn processBlockStep( var validator_ids = ctx.allocator.alloc(types.ValidatorIndex, indices.items.len) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to allocate validator ids ({s})\n", + "fixture {s} case {s}{f}: failed to allocate validator ids ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -783,7 +783,7 @@ fn processBlockStep( ctx.fork_choice.storeAggregatedPayload(validator_ids, &aggregated_attestation.data, proof_template, true) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to store aggregated payload ({s})\n", + "fixture {s} case {s}{f}: failed to store aggregated payload ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.FixtureMismatch; @@ -794,7 +794,7 @@ fn processBlockStep( var proposer_attestation = buildProposerAttestation(block, block_root, parent_state_ptr) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: unable to build proposer attestation ({s})\n", + "fixture {s} case {s}{f}: unable to build proposer attestation ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.FixtureMismatch; @@ -824,7 +824,7 @@ fn processBlockStep( var proposer_proof = types.AggregatedSignatureProof.init(ctx.allocator) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to init proposer proof ({s})\n", + "fixture {s} case {s}{f}: failed to init proposer proof ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -833,7 +833,7 @@ fn processBlockStep( types.aggregationBitsSet(&proposer_proof.participants, proposer_attestation.validator_id, true) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to set proposer participant bit ({s})\n", + "fixture {s} case {s}{f}: failed to set proposer participant bit ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -858,7 +858,7 @@ fn processBlockStep( .string => |s| s, else => { std.debug.print( - "fixture {s} case {s}{any}: blockRootLabel must be string\n", + "fixture {s} case {s}{f}: blockRootLabel must be string\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -866,7 +866,7 @@ fn processBlockStep( }; ctx.label_map.put(ctx.allocator, label, block_root) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to record blockRootLabel {s} ({s})\n", + "fixture {s} case {s}{f}: failed to record blockRootLabel {s} ({s})\n", .{ fixture_path, case_name, formatStep(step_index), label, @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -887,7 +887,7 @@ fn processTickStep( const anchor_genesis_time = ctx.fork_choice.anchorState.config.genesis_time; if (time_value < anchor_genesis_time) { std.debug.print( - "fixture {s} case {s}{any}: tick time before genesis\n", + "fixture {s} case {s}{f}: tick time before genesis\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -913,7 +913,7 @@ fn applyChecks( const expected = try expectU64Value(value, fixture_path, case_name, step_index, key); if (ctx.fork_choice.head.slot != expected) { std.debug.print( - "fixture {s} case {s}{any}: head slot mismatch got {d} expected {d}\n", + "fixture {s} case {s}{f}: head slot mismatch got {d} expected {d}\n", .{ fixture_path, case_name, formatStep(step_index), ctx.fork_choice.head.slot, expected }, ); return FixtureError.FixtureMismatch; @@ -925,7 +925,7 @@ fn applyChecks( const expected = try expectRootValue(value, fixture_path, case_name, step_index, key); if (!std.mem.eql(u8, &ctx.fork_choice.head.blockRoot, &expected)) { std.debug.print( - "fixture {s} case {s}{any}: head root mismatch\n", + "fixture {s} case {s}{f}: head root mismatch\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.FixtureMismatch; @@ -938,7 +938,7 @@ fn applyChecks( .string => |s| s, else => { std.debug.print( - "fixture {s} case {s}{any}: headRootLabel must be string\n", + "fixture {s} case {s}{f}: headRootLabel must be string\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -948,7 +948,7 @@ fn applyChecks( if (ctx.label_map.get(label)) |expected_root| { if (!std.mem.eql(u8, &head_root, &expected_root)) { std.debug.print( - "fixture {s} case {s}{any}: head root label {s} mismatch\n", + "fixture {s} case {s}{f}: head root label {s} mismatch\n", .{ fixture_path, case_name, formatStep(step_index), label }, ); return FixtureError.FixtureMismatch; @@ -956,7 +956,7 @@ fn applyChecks( } else { ctx.label_map.put(ctx.allocator, label, head_root) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to record label {s} ({s})\n", + "fixture {s} case {s}{f}: failed to record label {s} ({s})\n", .{ fixture_path, case_name, formatStep(step_index), label, @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -969,7 +969,7 @@ fn applyChecks( const expected = try expectU64Value(value, fixture_path, case_name, step_index, key); if (ctx.fork_choice.fcStore.slot_clock.time.load(.monotonic) != expected) { std.debug.print( - "fixture {s} case {s}{any}: store time mismatch got {d} expected {d}\n", + "fixture {s} case {s}{f}: store time mismatch got {d} expected {d}\n", .{ fixture_path, case_name, formatStep(step_index), ctx.fork_choice.fcStore.slot_clock.time.load(.monotonic), expected }, ); return FixtureError.FixtureMismatch; @@ -982,7 +982,7 @@ fn applyChecks( const actual = ctx.fork_choice.fcStore.latest_justified.slot; if (actual != expected) { std.debug.print( - "fixture {s} case {s}{any}: latest justified slot mismatch got {d} expected {d}\n", + "fixture {s} case {s}{f}: latest justified slot mismatch got {d} expected {d}\n", .{ fixture_path, case_name, formatStep(step_index), actual, expected }, ); return FixtureError.FixtureMismatch; @@ -995,7 +995,7 @@ fn applyChecks( const actual = ctx.fork_choice.fcStore.latest_finalized.slot; if (actual != expected) { std.debug.print( - "fixture {s} case {s}{any}: latest finalized slot mismatch got {d} expected {d}\n", + "fixture {s} case {s}{f}: latest finalized slot mismatch got {d} expected {d}\n", .{ fixture_path, case_name, formatStep(step_index), actual, expected }, ); return FixtureError.FixtureMismatch; @@ -1007,14 +1007,14 @@ fn applyChecks( const expected = try expectU64Value(value, fixture_path, case_name, step_index, key); const checkpoint = ctx.fork_choice.getAttestationTarget() catch |err| { std.debug.print( - "fixture {s} case {s}{any}: attestation target failed {s}\n", + "fixture {s} case {s}{f}: attestation target failed {s}\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.FixtureMismatch; }; if (checkpoint.slot != expected) { std.debug.print( - "fixture {s} case {s}{any}: attestation target slot mismatch\n", + "fixture {s} case {s}{f}: attestation target slot mismatch\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.FixtureMismatch; @@ -1032,7 +1032,7 @@ fn applyChecks( const actual: u64 = @intCast(ctx.block_attestations.items.len); if (actual != expected) { std.debug.print( - "fixture {s} case {s}{any}: block attestation count mismatch got {d} expected {d}\n", + "fixture {s} case {s}{f}: block attestation count mismatch got {d} expected {d}\n", .{ fixture_path, case_name, formatStep(step_index), actual, expected }, ); return FixtureError.FixtureMismatch; @@ -1051,7 +1051,7 @@ fn applyChecks( } std.debug.print( - "fixture {s} case {s}{any}: unsupported check {s}\n", + "fixture {s} case {s}{f}: unsupported check {s}\n", .{ fixture_path, case_name, formatStep(step_index), key }, ); return FixtureError.UnsupportedFixture; @@ -1069,7 +1069,7 @@ fn verifyBlockAttestations( .array => |entries| entries, else => { std.debug.print( - "fixture {s} case {s}{any}: blockAttestations must be array\n", + "fixture {s} case {s}{f}: blockAttestations must be array\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -1078,7 +1078,7 @@ fn verifyBlockAttestations( if (ctx.block_attestations.items.len != arr.items.len) { std.debug.print( - "fixture {s} case {s}{any}: block attestation count mismatch got {d} expected {d}\n", + "fixture {s} case {s}{f}: block attestation count mismatch got {d} expected {d}\n", .{ fixture_path, case_name, formatStep(step_index), ctx.block_attestations.items.len, arr.items.len }, ); return FixtureError.FixtureMismatch; @@ -1086,7 +1086,7 @@ fn verifyBlockAttestations( const matched = ctx.allocator.alloc(bool, ctx.block_attestations.items.len) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to allocate match buffer ({s})\n", + "fixture {s} case {s}{f}: failed to allocate match buffer ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -1099,7 +1099,7 @@ fn verifyBlockAttestations( .object => |map| map, else => { std.debug.print( - "fixture {s} case {s}{any}: blockAttestations entry must be object\n", + "fixture {s} case {s}{f}: blockAttestations entry must be object\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -1108,7 +1108,7 @@ fn verifyBlockAttestations( const participants_value = obj.get("participants") orelse { std.debug.print( - "fixture {s} case {s}{any}: blockAttestations missing participants\n", + "fixture {s} case {s}{f}: blockAttestations missing participants\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -1117,7 +1117,7 @@ fn verifyBlockAttestations( .array => |entries| entries, else => { std.debug.print( - "fixture {s} case {s}{any}: blockAttestations participants must be array\n", + "fixture {s} case {s}{f}: blockAttestations participants must be array\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -1125,7 +1125,7 @@ fn verifyBlockAttestations( }; const expected_participants = ctx.allocator.alloc(u64, participants_arr.items.len) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: failed to allocate expected participants ({s})\n", + "fixture {s} case {s}{f}: failed to allocate expected participants ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -1162,7 +1162,7 @@ fn verifyBlockAttestations( if (!found) { std.debug.print( - "fixture {s} case {s}{any}: blockAttestations entry mismatch\n", + "fixture {s} case {s}{f}: blockAttestations entry mismatch\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.FixtureMismatch; @@ -1181,7 +1181,7 @@ fn verifyAttestationChecks( .array => |array| array, else => { std.debug.print( - "fixture {s} case {s}{any}: attestationChecks must be array\n", + "fixture {s} case {s}{f}: attestationChecks must be array\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -1193,7 +1193,7 @@ fn verifyAttestationChecks( .object => |map| map, else => { std.debug.print( - "fixture {s} case {s}{any}: attestationCheck entry must be object\n", + "fixture {s} case {s}{f}: attestationCheck entry must be object\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -1205,7 +1205,7 @@ fn verifyAttestationChecks( const tracker = ctx.fork_choice.attestations.get(validator) orelse { std.debug.print( - "fixture {s} case {s}{any}: attestation tracker missing for validator {d}\n", + "fixture {s} case {s}{f}: attestation tracker missing for validator {d}\n", .{ fixture_path, case_name, formatStep(step_index), validator }, ); return FixtureError.FixtureMismatch; @@ -1220,7 +1220,7 @@ fn verifyAttestationChecks( if (proto == null) { std.debug.print( - "fixture {s} case {s}{any}: validator {d} missing {s} attestation\n", + "fixture {s} case {s}{f}: validator {d} missing {s} attestation\n", .{ fixture_path, case_name, formatStep(step_index), validator, location }, ); return FixtureError.FixtureMismatch; @@ -1228,7 +1228,7 @@ fn verifyAttestationChecks( const attestation_data = proto.?.attestation_data orelse { std.debug.print( - "fixture {s} case {s}{any}: validator {d} has no attestation payload\n", + "fixture {s} case {s}{f}: validator {d} has no attestation payload\n", .{ fixture_path, case_name, formatStep(step_index), validator }, ); return FixtureError.FixtureMismatch; @@ -1238,7 +1238,7 @@ fn verifyAttestationChecks( const expected = try expectU64Value(slot_value, fixture_path, case_name, step_index, "attestationSlot"); if (attestation_data.slot != expected) { std.debug.print( - "fixture {s} case {s}{any}: validator {d} attestation slot mismatch\n", + "fixture {s} case {s}{f}: validator {d} attestation slot mismatch\n", .{ fixture_path, case_name, formatStep(step_index), validator }, ); return FixtureError.FixtureMismatch; @@ -1249,7 +1249,7 @@ fn verifyAttestationChecks( const expected = try expectU64Value(slot_value, fixture_path, case_name, step_index, "headSlot"); if (attestation_data.head.slot != expected) { std.debug.print( - "fixture {s} case {s}{any}: validator {d} head slot mismatch\n", + "fixture {s} case {s}{f}: validator {d} head slot mismatch\n", .{ fixture_path, case_name, formatStep(step_index), validator }, ); return FixtureError.FixtureMismatch; @@ -1260,7 +1260,7 @@ fn verifyAttestationChecks( const expected = try expectU64Value(slot_value, fixture_path, case_name, step_index, "sourceSlot"); if (attestation_data.source.slot != expected) { std.debug.print( - "fixture {s} case {s}{any}: validator {d} source slot mismatch\n", + "fixture {s} case {s}{f}: validator {d} source slot mismatch\n", .{ fixture_path, case_name, formatStep(step_index), validator }, ); return FixtureError.FixtureMismatch; @@ -1271,7 +1271,7 @@ fn verifyAttestationChecks( const expected = try expectU64Value(slot_value, fixture_path, case_name, step_index, "targetSlot"); if (attestation_data.target.slot != expected) { std.debug.print( - "fixture {s} case {s}{any}: validator {d} target slot mismatch\n", + "fixture {s} case {s}{f}: validator {d} target slot mismatch\n", .{ fixture_path, case_name, formatStep(step_index), validator }, ); return FixtureError.FixtureMismatch; @@ -1291,7 +1291,7 @@ fn verifyLexicographicHead( .array => |entries| entries, else => { std.debug.print( - "fixture {s} case {s}{any}: lexicographicHeadAmong must be array\n", + "fixture {s} case {s}{f}: lexicographicHeadAmong must be array\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -1300,7 +1300,7 @@ fn verifyLexicographicHead( if (arr.items.len == 0) { std.debug.print( - "fixture {s} case {s}{any}: lexicographicHeadAmong cannot be empty\n", + "fixture {s} case {s}{f}: lexicographicHeadAmong cannot be empty\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -1314,7 +1314,7 @@ fn verifyLexicographicHead( .string => |s| s, else => { std.debug.print( - "fixture {s} case {s}{any}: lexicographicHeadAmong entries must be strings\n", + "fixture {s} case {s}{f}: lexicographicHeadAmong entries must be strings\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -1323,7 +1323,7 @@ fn verifyLexicographicHead( const root = ctx.label_map.get(label) orelse { std.debug.print( - "fixture {s} case {s}{any}: lexicographicHeadAmong label {s} not found (missing prior headRootLabel?)\n", + "fixture {s} case {s}{f}: lexicographicHeadAmong label {s} not found (missing prior headRootLabel?)\n", .{ fixture_path, case_name, formatStep(step_index), label }, ); return FixtureError.InvalidFixture; @@ -1344,7 +1344,7 @@ fn verifyLexicographicHead( const head_root = ctx.fork_choice.head.blockRoot; if (!std.mem.eql(u8, &head_root, &expected_root)) { std.debug.print( - "fixture {s} case {s}{any}: head root mismatch for lexicographicHeadAmong (expected label {s})\n", + "fixture {s} case {s}{f}: head root mismatch for lexicographicHeadAmong (expected label {s})\n", .{ fixture_path, case_name, formatStep(step_index), best_label }, ); return FixtureError.FixtureMismatch; @@ -1380,7 +1380,7 @@ fn parseFixtureProposerAttestation( .object => |map| map, else => { std.debug.print( - "fixture {s} case {s}{any}: proposerAttestation must be object\n", + "fixture {s} case {s}{f}: proposerAttestation must be object\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -1498,7 +1498,7 @@ fn parseAttestations( .array => |array| array, else => { std.debug.print( - "fixture {s} case {s}{any}: attestations.data must be array\n", + "fixture {s} case {s}{f}: attestations.data must be array\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; @@ -1513,7 +1513,7 @@ fn parseAttestations( .object => |map| map, else => { std.debug.print( - "fixture {s} case {s}{any}: attestation #{} must be object\n", + "fixture {s} case {s}{f}: attestation #{} must be object\n", .{ fixture_path, case_name, formatStep(step_index), idx }, ); return FixtureError.InvalidFixture; @@ -1522,7 +1522,7 @@ fn parseAttestations( const bits_value = att_obj.get("aggregationBits") orelse { std.debug.print( - "fixture {s} case {s}{any}: attestation #{} missing aggregationBits\n", + "fixture {s} case {s}{f}: attestation #{} missing aggregationBits\n", .{ fixture_path, case_name, formatStep(step_index), idx }, ); return FixtureError.InvalidFixture; @@ -1531,7 +1531,7 @@ fn parseAttestations( .object => |map| map, else => { std.debug.print( - "fixture {s} case {s}{any}: attestation #{} aggregationBits must be object\n", + "fixture {s} case {s}{f}: attestation #{} aggregationBits must be object\n", .{ fixture_path, case_name, formatStep(step_index), idx }, ); return FixtureError.InvalidFixture; @@ -1539,7 +1539,7 @@ fn parseAttestations( }; const bits_data_value = bits_obj.get("data") orelse { std.debug.print( - "fixture {s} case {s}{any}: attestation #{} aggregationBits missing data\n", + "fixture {s} case {s}{f}: attestation #{} aggregationBits missing data\n", .{ fixture_path, case_name, formatStep(step_index), idx }, ); return FixtureError.InvalidFixture; @@ -1548,7 +1548,7 @@ fn parseAttestations( .array => |array| array, else => { std.debug.print( - "fixture {s} case {s}{any}: attestation #{} aggregationBits.data must be array\n", + "fixture {s} case {s}{f}: attestation #{} aggregationBits.data must be array\n", .{ fixture_path, case_name, formatStep(step_index), idx }, ); return FixtureError.InvalidFixture; @@ -1563,7 +1563,7 @@ fn parseAttestations( .bool => |b| b, else => { std.debug.print( - "fixture {s} case {s}{any}: attestation #{} aggregationBits element must be bool\n", + "fixture {s} case {s}{f}: attestation #{} aggregationBits element must be bool\n", .{ fixture_path, case_name, formatStep(step_index), idx }, ); return FixtureError.InvalidFixture; @@ -1615,7 +1615,7 @@ fn parseAttestations( aggregated_attestations.append(aggregated_attestation) catch |err| { std.debug.print( - "fixture {s} case {s}{any}: attestation #{} append failed: {s}\n", + "fixture {s} case {s}{f}: attestation #{} append failed: {s}\n", .{ fixture_path, case_name, formatStep(step_index), idx, @errorName(err) }, ); return FixtureError.InvalidFixture; @@ -1626,7 +1626,7 @@ fn parseAttestations( }, else => { std.debug.print( - "fixture {s} case {s}{any}: attestations must be object\n", + "fixture {s} case {s}{f}: attestations must be object\n", .{ fixture_path, case_name, formatStep(step_index) }, ); return FixtureError.InvalidFixture; From ac3083ca4f36eda95c6deb019b7c751627bb8626 Mon Sep 17 00:00:00 2001 From: zclawz Date: Mon, 16 Mar 2026 23:14:13 +0530 Subject: [PATCH 10/24] fix: properly free BeamState on error paths to prevent segfaults (closes #675) (#676) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: replace {any} with {f} for types with format methods, fix GossipTopic format signature (closes #594) * fix: replace all {any} with {f} for types with custom format methods (closes #594) * fix: properly free BeamState on error paths to prevent segfaults (closes #675) Three memory-safety fixes in chain.zig: 1. onBlock — computedstate block: add errdefer for cpost_state - After allocator.create(): errdefer destroy (covers sszClone failure) - After sszClone(): errdefer deinit (covers verifySignatures / apply_transition failures, e.g. InvalidPostState). LIFO ordering ensures deinit runs before destroy, so interior ArrayList fields are freed cleanly. - Previously: a partially-mutated cloned state (historical_block_hashes already appended by process_slots) was silently leaked, causing UB / segfault. 2. onBlock — function level: add errdefer for post_state when we own it - If computedstate succeeds but a later step (forkChoice.onBlock, updateHead, InvalidSignatureGroups) returns an error, post_state was previously leaked. 3. BeamChain init — cloned_anchor_state: add errdefer before states.put - Same create+sszClone pattern without cleanup on states.put failure. --------- Co-authored-by: zclawz Co-authored-by: Anshal Shukla <53994948+anshalshukla@users.noreply.github.com> --- pkgs/node/src/chain.zig | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pkgs/node/src/chain.zig b/pkgs/node/src/chain.zig index 0531a851b..17bfe23a3 100644 --- a/pkgs/node/src/chain.zig +++ b/pkgs/node/src/chain.zig @@ -139,7 +139,11 @@ pub const BeamChain = struct { var states = std.AutoHashMap(types.Root, *types.BeamState).init(allocator); const cloned_anchor_state = try allocator.create(types.BeamState); + // Destroy outer allocation if sszClone fails (interior not yet allocated). + errdefer allocator.destroy(cloned_anchor_state); try types.sszClone(allocator, types.BeamState, opts.anchorState.*, cloned_anchor_state); + // Interior fields are now allocated; deinit them if states.put fails (LIFO order). + errdefer cloned_anchor_state.deinit(); try states.put(fork_choice.head.blockRoot, cloned_anchor_state); var chain = Self{ @@ -833,11 +837,18 @@ pub const BeamChain = struct { break :computedroot cblock_root; }; + const post_state_owned = blockInfo.postState == null; const post_state = if (blockInfo.postState) |post_state_ptr| post_state_ptr else computedstate: { // 1. get parent state const pre_state = self.states.get(block.parent_root) orelse return BlockProcessingError.MissingPreState; const cpost_state = try self.allocator.create(types.BeamState); + // If sszClone or anything after fails, destroy the outer allocation. + errdefer self.allocator.destroy(cpost_state); + try types.sszClone(self.allocator, types.BeamState, pre_state.*, cpost_state); + // sszClone succeeded — interior heap fields are now allocated. + // If anything below fails, deinit interior first (LIFO: deinit runs before destroy above). + errdefer cpost_state.deinit(); // 2. verify XMSS signatures (independent step; placed before STF for now, parallelizable later) // Use public key cache to avoid repeated SSZ deserialization of validator public keys @@ -851,6 +862,12 @@ pub const BeamChain = struct { }); break :computedstate cpost_state; }; + // If post_state was freshly allocated above and a later step errors (e.g. forkChoice.onBlock, + // updateHead, or InvalidSignatureGroups), we must free it before returning the error. + errdefer if (post_state_owned) { + post_state.deinit(); + self.allocator.destroy(post_state); + }; // Add current block's root to cache AFTER STF (ensures cache stays in sync with historical_block_hashes) try self.root_to_slot_cache.put(block_root, block.slot); From cda4651a6d527a26528b0bd83b6f357e5f7b6482 Mon Sep 17 00:00:00 2001 From: zclawz Date: Wed, 18 Mar 2026 15:48:07 +0530 Subject: [PATCH 11/24] fix: wire attestation-committee-count CLI flag through to ChainOptions (#678) * fix: wire attestation-committee-count CLI flag through to ChainOptions NodeCommand was missing the @"attestation-committee-count" field, so simargs silently ignored the flag and the value always fell through to the hardcoded default of 1 in ChainConfig.init. With every node using committee_count=1, all validators compute validator_id % 1 = 0 and end up subscribed to the same single subnet. Changes: - Add @"attestation-committee-count": ?u64 = null to NodeCommand so simargs parses and exposes the flag. - Add attestation_committee_count: ?u64 = null to NodeOptions. - Add attestationCommitteeCountFromYAML() helper that reads ATTESTATION_COMMITTEE_COUNT from config.yaml as a fallback. - In buildStartOptions: CLI flag takes precedence; falls back to config.yaml value; leaves null if neither is present (ChainConfig still defaults to 1 in that case for backward compat). - In Node.init: apply options.attestation_committee_count to chain_options before constructing ChainConfig so the correct value reaches EthLibp2p.attestation_committee_count and the subnet assignment logic. - Add ATTESTATION_COMMITTEE_COUNT: 4 to test fixture config.yaml. - Add two unit tests for attestationCommitteeCountFromYAML. * fix: validate attestation-committee-count >= 1, default to 1 with warning * build: add minimum_zig_version = "0.15.0" to build.zig.zon --------- Co-authored-by: zclawz Co-authored-by: zclawz --- build.zig.zon | 1 + pkgs/cli/src/main.zig | 2 + pkgs/cli/src/node.zig | 73 ++++++++++++++++++++++++++++++ pkgs/cli/test/fixtures/config.yaml | 3 ++ 4 files changed, 79 insertions(+) diff --git a/build.zig.zon b/build.zig.zon index a7e6f3fd5..a3486763b 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -2,6 +2,7 @@ .name = .zeam, .fingerprint = 0x243fd12cc5f554a5, .version = "0.0.0", + .minimum_zig_version = "0.15.0", .dependencies = .{ .ssz = .{ .url = "git+https://github.com/blockblaz/ssz.zig#c5394395dd7d0f8eda685c4723ad25ebbf550570", diff --git a/pkgs/cli/src/main.zig b/pkgs/cli/src/main.zig index 7697c2b13..ed61b3a38 100644 --- a/pkgs/cli/src/main.zig +++ b/pkgs/cli/src/main.zig @@ -71,6 +71,7 @@ pub const NodeCommand = struct { @"data-dir": []const u8 = constants.DEFAULT_DATA_DIR, @"checkpoint-sync-url": ?[]const u8 = null, @"is-aggregator": bool = false, + @"attestation-committee-count": ?u64 = null, pub const __shorts__ = .{ .help = .h, @@ -91,6 +92,7 @@ pub const NodeCommand = struct { .@"data-dir" = "Path to the data directory", .@"checkpoint-sync-url" = "URL to fetch finalized checkpoint state from for checkpoint sync (e.g., http://localhost:5052/lean/v0/states/finalized)", .@"is-aggregator" = "Enable aggregator mode for committee signature aggregation", + .@"attestation-committee-count" = "Number of attestation committees (subnets); overrides config.yaml ATTESTATION_COMMITTEE_COUNT", .help = "Show help information for the node command", }; }; diff --git a/pkgs/cli/src/node.zig b/pkgs/cli/src/node.zig index a808a469a..1a98c1f92 100644 --- a/pkgs/cli/src/node.zig +++ b/pkgs/cli/src/node.zig @@ -87,6 +87,7 @@ pub const NodeOptions = struct { hash_sig_key_dir: []const u8, node_registry: *node_lib.NodeNameRegistry, checkpoint_sync_url: ?[]const u8 = null, + attestation_committee_count: ?u64 = null, pub fn deinit(self: *NodeOptions, allocator: std.mem.Allocator) void { for (self.bootnodes) |b| allocator.free(b); @@ -153,6 +154,12 @@ pub const Node = struct { // Set validator_pubkeys from genesis_spec (read from config.yaml via genesisConfigFromYAML) chain_options.validator_pubkeys = options.genesis_spec.validator_pubkeys; + // Apply attestation_committee_count if provided via CLI flag or config.yaml. + // ChainConfig.init falls back to 1 when this field is null, so we only override when set. + if (options.attestation_committee_count) |count| { + chain_options.attestation_committee_count = @intCast(count); + } + // transfer ownership of the chain_options to ChainConfig const chain_config = try ChainConfig.init(Chain.custom, chain_options); @@ -462,6 +469,19 @@ pub const Node = struct { } }; +/// Reads ATTESTATION_COMMITTEE_COUNT from a parsed config.yaml Yaml document. +/// Returns null if the field is absent or cannot be parsed. +fn attestationCommitteeCountFromYAML(config: Yaml) ?u64 { + if (config.docs.items.len == 0) return null; + const root = config.docs.items[0]; + if (root != .map) return null; + const value = root.map.get("ATTESTATION_COMMITTEE_COUNT") orelse return null; + return switch (value) { + .scalar => |s| std.fmt.parseInt(u64, s, 10) catch null, + else => null, + }; +} + /// Builds the start options for a node based on the provided command and options. /// It loads the necessary configuration files, parses them, and populates the /// `StartNodeOptions` structure. @@ -554,6 +574,26 @@ pub fn buildStartOptions( opts.hash_sig_key_dir = hash_sig_key_dir; opts.checkpoint_sync_url = node_cmd.@"checkpoint-sync-url"; opts.is_aggregator = node_cmd.@"is-aggregator"; + + // Resolve attestation_committee_count: CLI flag takes precedence over config.yaml. + if (node_cmd.@"attestation-committee-count") |count| { + opts.attestation_committee_count = count; + } else { + // Try to read ATTESTATION_COMMITTEE_COUNT from config.yaml + opts.attestation_committee_count = attestationCommitteeCountFromYAML(parsed_config); + } + + // Validate: attestation_committee_count must be >= 1. + // If the resolved value is 0 (an invalid input), log a warning and fall back to 1. + if (opts.attestation_committee_count) |count| { + if (count == 0) { + std.log.warn( + "attestation-committee-count must be >= 1 (got 0); defaulting to 1", + .{}, + ); + opts.attestation_committee_count = 1; + } + } } /// Downloads finalized checkpoint state from the given URL and deserializes it @@ -1382,3 +1422,36 @@ test "NodeOptions checkpoint_sync_url field is optional" { node_options.checkpoint_sync_url = "http://localhost:5052/lean/v0/states/finalized"; try std.testing.expect(node_options.checkpoint_sync_url != null); } + +test "attestationCommitteeCountFromYAML reads ATTESTATION_COMMITTEE_COUNT from config.yaml" { + var config_file = try utils_lib.loadFromYAMLFile(std.testing.allocator, "pkgs/cli/test/fixtures/config.yaml"); + defer config_file.deinit(std.testing.allocator); + + const count = attestationCommitteeCountFromYAML(config_file); + try std.testing.expect(count != null); + try std.testing.expectEqual(@as(u64, 4), count.?); +} + +test "attestationCommitteeCountFromYAML returns null when field is absent" { + // validator-config.yaml has no ATTESTATION_COMMITTEE_COUNT field + var validator_config = try utils_lib.loadFromYAMLFile(std.testing.allocator, "pkgs/cli/test/fixtures/validator-config.yaml"); + defer validator_config.deinit(std.testing.allocator); + + const count = attestationCommitteeCountFromYAML(validator_config); + try std.testing.expect(count == null); +} + +test "attestation_committee_count: zero value is clamped to 1 with a warning" { + // Simulate opts with count=0 — the validation block should reset it to 1. + var opts: NodeOptions = undefined; + opts.attestation_committee_count = 0; + + // Mirror the validation logic from buildStartOptions. + if (opts.attestation_committee_count) |count| { + if (count == 0) { + opts.attestation_committee_count = 1; + } + } + + try std.testing.expectEqual(@as(?u64, 1), opts.attestation_committee_count); +} diff --git a/pkgs/cli/test/fixtures/config.yaml b/pkgs/cli/test/fixtures/config.yaml index ed63ff2ad..82a717bb9 100644 --- a/pkgs/cli/test/fixtures/config.yaml +++ b/pkgs/cli/test/fixtures/config.yaml @@ -1,6 +1,9 @@ # Genesis Settings GENESIS_TIME: 1704085200 +# Chain Settings +ATTESTATION_COMMITTEE_COUNT: 4 + # Validator Settings VALIDATOR_COUNT: 9 From 71f6a671b9cc323fa40d3692371ab0d1c7970b82 Mon Sep 17 00:00:00 2001 From: zclawz Date: Thu, 19 Mar 2026 10:48:10 +0530 Subject: [PATCH 12/24] refactor: use idiomatic iterators and avoid manual index loops (#667) Co-authored-by: zclawz --- pkgs/cli/src/node.zig | 4 +--- pkgs/network/src/mock.zig | 5 ++--- pkgs/node/src/node.zig | 8 ++------ 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/pkgs/cli/src/node.zig b/pkgs/cli/src/node.zig index 1a98c1f92..a139be761 100644 --- a/pkgs/cli/src/node.zig +++ b/pkgs/cli/src/node.zig @@ -832,13 +832,11 @@ fn validatorAssignmentsFromYAML(allocator: std.mem.Allocator, node_key: []const //``` fn nodeKeyIndexFromYaml(node_key: []const u8, validator_config: Yaml) !usize { - var index: usize = 0; - for (validator_config.docs.items[0].map.get("validators").?.list) |entry| { + for (validator_config.docs.items[0].map.get("validators").?.list, 0..) |entry, index| { const name_value = entry.map.get("name").?; if (name_value == .scalar and std.mem.eql(u8, name_value.scalar, node_key)) { return index; } - index += 1; } return error.InvalidNodeKey; } diff --git a/pkgs/network/src/mock.zig b/pkgs/network/src/mock.zig index 4f1c57999..bf776d5fa 100644 --- a/pkgs/network/src/mock.zig +++ b/pkgs/network/src/mock.zig @@ -886,9 +886,8 @@ test "Mock status RPC between peers" { fn onPeerDisconnected(ptr: *anyopaque, peer_id: []const u8, _: interface.PeerDirection, _: interface.DisconnectionReason) !void { const self: *Self = @ptrCast(@alignCast(ptr)); - var idx: usize = 0; - while (idx < self.connections.items.len) : (idx += 1) { - if (std.mem.eql(u8, self.connections.items[idx], peer_id)) { + for (self.connections.items, 0..) |conn, idx| { + if (std.mem.eql(u8, conn, peer_id)) { const removed = self.connections.swapRemove(idx); self.allocator.free(removed); break; diff --git a/pkgs/node/src/node.zig b/pkgs/node/src/node.zig index b7fa52ace..80f36366e 100644 --- a/pkgs/node/src/node.zig +++ b/pkgs/node/src/node.zig @@ -1255,13 +1255,9 @@ pub const BeamNode = struct { } fn publishProducedAggregations(self: *Self, aggregations: []types.SignedAggregatedAttestation) !void { - var i: usize = 0; - while (i < aggregations.len) : (i += 1) { + for (aggregations, 0..) |_, i| { self.publishAggregation(aggregations[i]) catch |err| { - var j: usize = i; - while (j < aggregations.len) : (j += 1) { - aggregations[j].deinit(); - } + for (aggregations[i..]) |*a| a.deinit(); return err; }; aggregations[i].deinit(); From 979610b483ca7a86ab9533c5e70667ea19e0d9fc Mon Sep 17 00:00:00 2001 From: zclawz Date: Thu, 19 Mar 2026 10:51:16 +0530 Subject: [PATCH 13/24] perf: iterate HashMap entries to avoid unnecessary hash lookups (closes #663) (#666) Co-authored-by: zclawz Co-authored-by: Anshal Shukla <53994948+anshalshukla@users.noreply.github.com> --- pkgs/node/src/forkchoice.zig | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pkgs/node/src/forkchoice.zig b/pkgs/node/src/forkchoice.zig index 448db64ec..9692377e2 100644 --- a/pkgs/node/src/forkchoice.zig +++ b/pkgs/node/src/forkchoice.zig @@ -928,9 +928,10 @@ pub const ForkChoice = struct { // TODO naive strategy to include all attestations that are consistent with the latest justified // replace by the other mini 3sf simple strategy to loop and see if justification happens and // till no further attestations can be added - for (0..self.config.genesis.numValidators()) |validator_id| { - const attestation_data = ((self.attestations.get(validator_id) orelse AttestationTracker{}) - .latestKnown orelse ProtoAttestation{}).attestation_data; + var att_iter = self.attestations.iterator(); + while (att_iter.next()) |entry| { + const validator_id = entry.key_ptr.*; + const attestation_data = (entry.value_ptr.latestKnown orelse ProtoAttestation{}).attestation_data; if (attestation_data) |att_data| { if (std.mem.eql(u8, &latest_justified.root, &att_data.source.root)) { @@ -988,23 +989,22 @@ pub const ForkChoice = struct { // balances are right now same for the dummy chain and each weighing 1 const validatorWeight = 1; - for (0..self.config.genesis.numValidators()) |validator_id| { - var attestation_tracker = self.attestations.get(validator_id) orelse AttestationTracker{}; - if (attestation_tracker.appliedIndex) |applied_index| { + var delta_iter = self.attestations.iterator(); + while (delta_iter.next()) |entry| { + if (entry.value_ptr.appliedIndex) |applied_index| { self.deltas.items[applied_index] -= validatorWeight; } - attestation_tracker.appliedIndex = null; + entry.value_ptr.appliedIndex = null; const latest_attestation = if (from_known) - attestation_tracker.latestKnown + entry.value_ptr.latestKnown else - attestation_tracker.latestNew; + entry.value_ptr.latestNew; if (latest_attestation) |delta_attestation| { self.deltas.items[delta_attestation.index] += validatorWeight; - attestation_tracker.appliedIndex = delta_attestation.index; + entry.value_ptr.appliedIndex = delta_attestation.index; } - try self.attestations.put(validator_id, attestation_tracker); } return self.deltas.items; From bd3d49f8c773872d2b8ff9f1baf81baef68763ba Mon Sep 17 00:00:00 2001 From: Parthasarathy Ramanujam <1627026+ch4r10t33r@users.noreply.github.com> Date: Thu, 19 Mar 2026 12:16:11 +0000 Subject: [PATCH 14/24] forkchoice: align /lean/v0/fork_choice response with leanSpec (#680) * forkchoice: align /lean/v0/fork_choice response with leanSpec - head: was {"slot", "root"}, now "0x..." flat root string - safe_target: was {"root": "0x..."}, now "0x..." flat root string - nodes: add proposer_index field (was missing) Achieved by: - Adding proposer_index to ProtoBlock and ProtoNode - Wiring proposer_index through all ProtoBlock/ProtoNode construction sites - Updating tree_visualizer.buildForkChoiceJSON serialization * forkchoice: add buildForkChoiceJSON test and clarifying comments --- pkgs/api/src/event_broadcaster.zig | 1 + pkgs/api/src/events.zig | 2 + pkgs/node/src/forkchoice.zig | 7 ++++ pkgs/node/src/tree_visualizer.zig | 67 +++++++++++++++++++++++++++--- pkgs/types/src/block.zig | 1 + 5 files changed, 73 insertions(+), 5 deletions(-) diff --git a/pkgs/api/src/event_broadcaster.zig b/pkgs/api/src/event_broadcaster.zig index 9131e6661..80d504f88 100644 --- a/pkgs/api/src/event_broadcaster.zig +++ b/pkgs/api/src/event_broadcaster.zig @@ -240,6 +240,7 @@ test "event broadcaster basic functionality" { // Test broadcasting an event (writes to the socket; read end fds[0] is left open so the write succeeds) const proto_block = types.ProtoBlock{ .slot = 123, + .proposer_index = 0, .blockRoot = [_]u8{1} ** 32, .parentRoot = [_]u8{2} ** 32, .stateRoot = [_]u8{3} ** 32, diff --git a/pkgs/api/src/events.zig b/pkgs/api/src/events.zig index f22890c0d..cfa4ab958 100644 --- a/pkgs/api/src/events.zig +++ b/pkgs/api/src/events.zig @@ -205,6 +205,7 @@ test "serialize new head event" { const proto_block = types.ProtoBlock{ .slot = 123, + .proposer_index = 0, .blockRoot = [_]u8{1} ** 32, .parentRoot = [_]u8{2} ** 32, .stateRoot = [_]u8{3} ** 32, @@ -271,6 +272,7 @@ fn makeSampleChainEvent(allocator: Allocator, tag: ChainEventType) !ChainEvent { .new_head => blk: { const proto_block = types.ProtoBlock{ .slot = 999_999, + .proposer_index = 0, .blockRoot = [_]u8{0xab} ** 32, .parentRoot = [_]u8{0xcd} ** 32, .stateRoot = [_]u8{0xef} ** 32, diff --git a/pkgs/node/src/forkchoice.zig b/pkgs/node/src/forkchoice.zig index 9692377e2..0fa6f14e7 100644 --- a/pkgs/node/src/forkchoice.zig +++ b/pkgs/node/src/forkchoice.zig @@ -23,6 +23,7 @@ const ProtoBlock = types.ProtoBlock; pub const ProtoNode = struct { // Fields from ProtoBlock slot: types.Slot, + proposer_index: types.ValidatorIndex, blockRoot: Root, parentRoot: Root, stateRoot: Root, @@ -109,6 +110,7 @@ pub const ProtoArray = struct { // }); const node = ProtoNode{ .slot = block.slot, + .proposer_index = block.proposer_index, .blockRoot = block.blockRoot, .parentRoot = block.parentRoot, .stateRoot = block.stateRoot, @@ -337,6 +339,7 @@ pub const ForkChoice = struct { const anchor_block = ProtoBlock{ .slot = opts.anchorState.slot, + .proposer_index = anchor_block_header.proposer_index, .blockRoot = anchor_block_root, .parentRoot = anchor_block_header.parent_root, .stateRoot = anchor_block_header.state_root, @@ -424,6 +427,7 @@ pub const ForkChoice = struct { // Fallback: create a ProtoNode from ProtoBlock if not found const head_node = ProtoNode{ .slot = self.head.slot, + .proposer_index = self.head.proposer_index, .blockRoot = self.head.blockRoot, .parentRoot = self.head.parentRoot, .stateRoot = self.head.stateRoot, @@ -1498,6 +1502,7 @@ pub const ForkChoice = struct { const proto_block = ProtoBlock{ .slot = slot, + .proposer_index = block.proposer_index, .blockRoot = block_root, .parentRoot = parent_root, .stateRoot = block.state_root, @@ -1529,6 +1534,7 @@ pub const ForkChoice = struct { // const block = utils.Cast(ProtoBlock, node); const block = ProtoBlock{ .slot = node.slot, + .proposer_index = node.proposer_index, .blockRoot = node.blockRoot, .parentRoot = node.parentRoot, .stateRoot = node.stateRoot, @@ -1915,6 +1921,7 @@ fn createTestRoot(fill_byte: u8) types.Root { fn createTestProtoBlock(slot: types.Slot, block_root_byte: u8, parent_root_byte: u8) ProtoBlock { return ProtoBlock{ .slot = slot, + .proposer_index = 0, .blockRoot = createTestRoot(block_root_byte), .parentRoot = createTestRoot(parent_root_byte), .stateRoot = createTestRoot(0x00), diff --git a/pkgs/node/src/tree_visualizer.zig b/pkgs/node/src/tree_visualizer.zig index ed769547b..52ba9d2c8 100644 --- a/pkgs/node/src/tree_visualizer.zig +++ b/pkgs/node/src/tree_visualizer.zig @@ -161,7 +161,15 @@ fn createTreeIndent(allocator: Allocator, depth: usize, is_last_child: bool) ![] } /// Build fork choice JSON for the /lean/v0/fork_choice API endpoint. -/// Matches the leanSpec format with head, justified, finalized, safe_target, and nodes. +/// +/// Field shapes follow leanSpec (src/lean_spec/subspecs/api/endpoints/fork_choice.py): +/// - head: bare root string "0x..." (NOT a {slot, root} object) +/// - safe_target: bare root string "0x..." (NOT a {root} object) +/// safe_target carries no slot because it is a root pointer, not a checkpoint; +/// the spec models it as Bytes32, distinct from the Checkpoint type used for +/// justified and finalized. +/// - justified / finalized: {slot, root} checkpoint objects +/// - nodes[]: {root, slot, parent_root, proposer_index, weight} pub fn buildForkChoiceJSON( snapshot: fcFactory.ForkChoice.Snapshot, output: *std.ArrayList(u8), @@ -170,9 +178,8 @@ pub fn buildForkChoiceJSON( const w = output.writer(allocator); try w.writeAll("{"); try w.print( - \\"head":{{"slot":{d},"root":"0x{x}"}},"justified":{{"slot":{d},"root":"0x{x}"}},"finalized":{{"slot":{d},"root":"0x{x}"}},"safe_target":{{"root":"0x{x}"}},"validator_count":{d},"nodes":[ + \\"head":"0x{x}","justified":{{"slot":{d},"root":"0x{x}"}},"finalized":{{"slot":{d},"root":"0x{x}"}},"safe_target":"0x{x}","validator_count":{d},"nodes":[ , .{ - snapshot.head.slot, &snapshot.head.blockRoot, snapshot.latest_justified.slot, &snapshot.latest_justified.root, @@ -185,8 +192,8 @@ pub fn buildForkChoiceJSON( for (snapshot.nodes, 0..) |node, i| { if (i > 0) try w.writeAll(","); try w.print( - \\{{"slot":{d},"root":"0x{x}","parent_root":"0x{x}","weight":{d}}} - , .{ node.slot, &node.blockRoot, &node.parentRoot, node.weight }); + \\{{"root":"0x{x}","slot":{d},"parent_root":"0x{x}","proposer_index":{d},"weight":{d}}} + , .{ &node.blockRoot, node.slot, &node.parentRoot, node.proposer_index, node.weight }); } try w.writeAll("]}"); } @@ -400,6 +407,7 @@ fn createTestProtoNode( ) fcFactory.ProtoNode { return fcFactory.ProtoNode{ .slot = slot, + .proposer_index = 0, .blockRoot = createTestRoot(block_root_byte), .parentRoot = createTestRoot(parent_root_byte), .stateRoot = createTestRoot(0x00), @@ -830,3 +838,52 @@ test "buildTreeVisualization: big tree with many branches and depth (max_depth=1 // Verify tree structure characters are used try std.testing.expect(std.mem.indexOf(u8, result, "├──") != null); } + +test "buildForkChoiceJSON: field shapes match leanSpec" { + const allocator = std.testing.allocator; + + // Build a minimal snapshot with one extra node besides the head. + const head_node = createTestProtoNode(5, 0xAA, 0xBB, null, 0, 0, 0, 0, 0, null); + const child_node = createTestProtoNode(6, 0xCC, 0xAA, 0, 1, 0, 0, 0, 0, null); + + const nodes = try allocator.dupe(fcFactory.ProtoNode, &.{ head_node, child_node }); + + const snapshot = fcFactory.ForkChoice.Snapshot{ + .head = head_node, + .latest_justified = types.Checkpoint{ .slot = 4, .root = createTestRoot(0x11) }, + .latest_finalized = types.Checkpoint{ .slot = 2, .root = createTestRoot(0x22) }, + .safe_target_root = createTestRoot(0x33), + .validator_count = 128, + .nodes = nodes, + }; + defer snapshot.deinit(allocator); + + var output: std.ArrayList(u8) = .empty; + defer output.deinit(allocator); + + try buildForkChoiceJSON(snapshot, &output, allocator); + const json = output.items; + + // head is a flat root string, NOT an object + try std.testing.expect(std.mem.indexOf(u8, json, "\"head\":\"0x") != null); + try std.testing.expect(std.mem.indexOf(u8, json, "\"head\":{") == null); + + // safe_target is a flat root string, NOT an object + try std.testing.expect(std.mem.indexOf(u8, json, "\"safe_target\":\"0x") != null); + try std.testing.expect(std.mem.indexOf(u8, json, "\"safe_target\":{") == null); + + // justified and finalized remain checkpoint objects with slot + root + try std.testing.expect(std.mem.indexOf(u8, json, "\"justified\":{\"slot\":4,") != null); + try std.testing.expect(std.mem.indexOf(u8, json, "\"finalized\":{\"slot\":2,") != null); + + // validator_count is present + try std.testing.expect(std.mem.indexOf(u8, json, "\"validator_count\":128") != null); + + // each node has proposer_index + try std.testing.expect(std.mem.indexOf(u8, json, "\"proposer_index\":") != null); + + // node fields: root, slot, parent_root, proposer_index, weight + try std.testing.expect(std.mem.indexOf(u8, json, "\"root\":\"0x") != null); + try std.testing.expect(std.mem.indexOf(u8, json, "\"parent_root\":\"0x") != null); + try std.testing.expect(std.mem.indexOf(u8, json, "\"weight\":") != null); +} diff --git a/pkgs/types/src/block.zig b/pkgs/types/src/block.zig index fa4362bcd..a29da5b42 100644 --- a/pkgs/types/src/block.zig +++ b/pkgs/types/src/block.zig @@ -659,6 +659,7 @@ pub const BlockByRootRequest = struct { /// Canonical lightweight forkchoice proto block used across modules pub const ProtoBlock = struct { slot: Slot, + proposer_index: ValidatorIndex, blockRoot: Root, parentRoot: Root, stateRoot: Root, From eb601b3c24e88349ffc0bb2c3002ff0932e0fb61 Mon Sep 17 00:00:00 2001 From: zclawz Date: Fri, 20 Mar 2026 17:37:11 +0530 Subject: [PATCH 15/24] fix: wipe stale database on genesis time mismatch (#638) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: improve state recovery logging and add genesis time validation (#481) - Add explicit success log when state is loaded from DB on restart - Validate genesis time of loaded state matches chain config; fall back to genesis on mismatch with a clear warning - Improve DB recovery logging: log block root and slot at each step so failures are easier to diagnose - Root cause: lean-quickstart spin-node.sh unconditionally wiped the data directory on every restart (fixed separately in lean-quickstart) * chore: update lean-quickstart submodule to fix data dir wipe on restart * fix: wipe stale database on genesis time mismatch When the DB contains state from a different genesis (genesis_time mismatch), close and delete the RocksDB directory before reopening so the node starts with a fresh DB instance rather than accumulating stale data. Requested by @g11tech in https://github.com/blockblaz/zeam/pull/637#discussion_r2899372652 * fix: add post-wipe genesis time re-check, log and return error if still mismatches * refactor: remove redundant post-wipe genesis check Per g11tech's review: the downstream loadLatestFinalizedState call will handle any inconsistency if the wipe somehow failed. No need to re-probe immediately after wiping. * fix: resolve CI failure - apply zig fmt to node.zig (remove trailing newline) * fix: error out if db wipe fails on genesis time mismatch * refactor the anchor setup on startup * fix: wipe db even when no local finalized state found Per @anshalshukla review: if loadLatestFinalizedState fails (no finalized state in db), we should still wipe the db for a clean slate rather than risk leftover data. NotFound errors are ignored since the db directory may not exist yet on first run. * build: switch production builds from ReleaseFast to ReleaseSafe ReleaseFast compiles unreachable statements and failed bounds/overflow checks to @trap() — a silent illegal instruction with no output. ReleaseSafe keeps safety checks active (bounds, overflow, unreachable → panics with a stack trace) while still applying most optimizations. This makes crash sites visible in production and CI instead of silently killing the process, which was the root cause of the hard-to-diagnose crashes fixed in PR #681. Affected: Dockerfile, ci.yml (SSE integration build), auto-release.yml (x86_64 and aarch64 release binaries). The ReleaseFast in build.zig is for the risc0/zkvm targets and is left unchanged. * fix: change ReleaseFast to ReleaseSafe in zkvm build step * fix: revert zkvm optimize to ReleaseFast (ReleaseSafe breaks riscv32 inline asm) --------- Co-authored-by: anshalshuklabot Co-authored-by: zclawz Co-authored-by: zeam-bot Co-authored-by: Anshal Shukla <53994948+anshalshukla@users.noreply.github.com> Co-authored-by: zclawz Co-authored-by: harkamal Co-authored-by: zclawz Co-authored-by: zclawz --- .github/workflows/auto-release.yml | 4 +- .github/workflows/ci.yml | 2 +- Dockerfile | 2 +- build.zig | 2 + lean-quickstart | 2 +- pkgs/cli/src/node.zig | 73 ++++++++++++++++++++++-------- pkgs/database/src/rocksdb.zig | 12 +++-- 7 files changed, 68 insertions(+), 29 deletions(-) diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index df2b17f2e..48064b6a8 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -184,9 +184,9 @@ jobs: - name: Build zeam natively run: | if [ "${{ matrix.arch }}" = "amd64" ]; then - zig build -Doptimize=ReleaseFast -Dtarget=x86_64-linux-gnu -Dcpu=baseline -Dgit_version="$(git rev-parse --short HEAD)" + zig build -Doptimize=ReleaseSafe -Dtarget=x86_64-linux-gnu -Dcpu=baseline -Dgit_version="$(git rev-parse --short HEAD)" else - zig build -Doptimize=ReleaseFast -Dtarget=aarch64-linux-gnu -Dcpu=baseline -Dgit_version="$(git rev-parse --short HEAD)" + zig build -Doptimize=ReleaseSafe -Dtarget=aarch64-linux-gnu -Dcpu=baseline -Dgit_version="$(git rev-parse --short HEAD)" fi - name: Build and push Docker image with pre-built binary (${{ matrix.arch }}) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 88aba60e7..a17468c95 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -448,7 +448,7 @@ jobs: run: | max_attempts=3 for attempt in $(seq 1 $max_attempts); do - if zig build -Doptimize=ReleaseFast -Dgit_version="$(git rev-parse --short HEAD)"; then + if zig build -Doptimize=ReleaseSafe -Dgit_version="$(git rev-parse --short HEAD)"; then echo "Successfully built on attempt $attempt" exit 0 fi diff --git a/Dockerfile b/Dockerfile index 4870999e9..04563645e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -93,7 +93,7 @@ RUN --mount=type=cache,target=/root/.cache/zig \ else \ GIT_VERSION=$(echo "$GIT_VERSION" | head -c 7); \ fi && \ - zig build -Doptimize=ReleaseFast -Dgit_version="$GIT_VERSION" + zig build -Doptimize=ReleaseSafe -Dgit_version="$GIT_VERSION" # Intermediate stage to prepare runtime libraries FROM ubuntu:24.04 AS runtime-prep diff --git a/build.zig b/build.zig index 588b0450e..86231ce37 100644 --- a/build.zig +++ b/build.zig @@ -733,6 +733,8 @@ fn build_zkvm_targets( build_options_module: *std.Build.Module, use_poseidon: bool, ) !void { + // zkvm targets (riscv32-freestanding-none) require ReleaseFast; ReleaseSafe + // triggers "invalid operand for inline asm constraint 'i'" in LLVM on riscv32. const optimize = .ReleaseFast; for (zkvm_targets) |zkvm_target| { diff --git a/lean-quickstart b/lean-quickstart index 043c30f30..9fd56dbba 160000 --- a/lean-quickstart +++ b/lean-quickstart @@ -1 +1 @@ -Subproject commit 043c30f302359c02c61ef35dd2fce91829ee92f0 +Subproject commit 9fd56dbba80901434a02e350ee1fa75a2c4d179f diff --git a/pkgs/cli/src/node.zig b/pkgs/cli/src/node.zig index a139be761..d12bbb8f3 100644 --- a/pkgs/cli/src/node.zig +++ b/pkgs/cli/src/node.zig @@ -191,39 +191,74 @@ pub const Node = struct { const anchorState: *types.BeamState = try allocator.create(types.BeamState); errdefer allocator.destroy(anchorState); self.anchor_state = anchorState; + errdefer self.anchor_state.deinit(); + + // load a valid local state available in db else genesis + var local_finalized_state: types.BeamState = undefined; + if (db.loadLatestFinalizedState(&local_finalized_state)) { + if (local_finalized_state.config.genesis_time != chain_config.genesis.genesis_time) { + self.logger.warn("database genesis time mismatch (db={d}, config={d}), wiping stale database", .{ + local_finalized_state.config.genesis_time, + chain_config.genesis.genesis_time, + }); + db.deinit(); + const rocksdb_path = try std.fmt.allocPrint(allocator, "{s}/rocksdb", .{options.database_path}); + defer allocator.free(rocksdb_path); + std.fs.deleteTreeAbsolute(rocksdb_path) catch |wipe_err| { + self.logger.err("failed to delete stale database directory '{s}': {any}", .{ rocksdb_path, wipe_err }); + return wipe_err; + }; + db = try database.Db.open(allocator, options.logger_config.logger(.database), options.database_path); + self.logger.info("stale database wiped, starting fresh & generating genesis", .{}); + + local_finalized_state.deinit(); + try self.anchor_state.genGenesisState(allocator, chain_config.genesis); + } else { + self.anchor_state.* = local_finalized_state; + } + } else |_| { + self.logger.info("no finalized state found in db, wiping database for a clean slate", .{}); + db.deinit(); + const rocksdb_path_fresh = try std.fmt.allocPrint(allocator, "{s}/rocksdb", .{options.database_path}); + defer allocator.free(rocksdb_path_fresh); + std.fs.deleteTreeAbsolute(rocksdb_path_fresh) catch |wipe_err| { + // Ignore NotFound — db may not exist yet, that is fine + if (wipe_err != error.FileNotFound) { + self.logger.err("failed to delete database directory '{s}': {any}", .{ rocksdb_path_fresh, wipe_err }); + return wipe_err; + } + }; + db = try database.Db.open(allocator, options.logger_config.logger(.database), options.database_path); + self.logger.info("starting fresh & generating genesis", .{}); + try self.anchor_state.genGenesisState(allocator, chain_config.genesis); + } - // Initialize anchor state with priority: checkpoint URL > database > genesis - var checkpoint_sync_succeeded = false; + // check if a valid and more recent checkpoint finalized state is available if (options.checkpoint_sync_url) |checkpoint_url| { self.logger.info("checkpoint sync enabled, downloading state from: {s}", .{checkpoint_url}); // Try checkpoint sync, fall back to database/genesis on failure - if (downloadCheckpointState(allocator, checkpoint_url, self.logger)) |downloaded_state| { - self.anchor_state.* = downloaded_state; - + if (downloadCheckpointState(allocator, checkpoint_url, self.logger)) |downloaded_state_const| { + var downloaded_state = downloaded_state_const; // Verify state against genesis config - if (verifyCheckpointState(allocator, self.anchor_state, &chain_config.genesis, self.logger)) { - self.logger.info("checkpoint sync completed successfully, using state at slot {d} as anchor", .{self.anchor_state.slot}); - checkpoint_sync_succeeded = true; + if (verifyCheckpointState(allocator, &downloaded_state, &chain_config.genesis, self.logger)) { + if (downloaded_state.slot > self.anchor_state.slot) { + self.logger.info("checkpoint sync completed successfully with a recent state at slot={d} as anchor", .{downloaded_state.slot}); + self.anchor_state.deinit(); + self.anchor_state.* = downloaded_state; + } else { + self.logger.warn("skipping checkpoint sync downloaded stale/same state at slot={d}, falling back to database", .{downloaded_state.slot}); + downloaded_state.deinit(); + } } else |verify_err| { self.logger.warn("checkpoint state verification failed: {}, falling back to database/genesis", .{verify_err}); - self.anchor_state.deinit(); + downloaded_state.deinit(); } } else |download_err| { self.logger.warn("checkpoint sync failed: {}, falling back to database/genesis", .{download_err}); } } - // Fall back to database/genesis if checkpoint sync was not attempted or failed - if (!checkpoint_sync_succeeded) { - // Try to load the latest finalized state from the database, fallback to genesis - db.loadLatestFinalizedState(self.anchor_state) catch |err| { - self.logger.warn("failed to load latest finalized state from database: {any}", .{err}); - try self.anchor_state.genGenesisState(allocator, chain_config.genesis); - }; - } - errdefer self.anchor_state.deinit(); - const num_validators: usize = @intCast(chain_config.genesis.numValidators()); self.key_manager = key_manager_lib.KeyManager.init(allocator); errdefer self.key_manager.deinit(); diff --git a/pkgs/database/src/rocksdb.zig b/pkgs/database/src/rocksdb.zig index 817d4b8b1..6058084e6 100644 --- a/pkgs/database/src/rocksdb.zig +++ b/pkgs/database/src/rocksdb.zig @@ -606,25 +606,27 @@ pub fn RocksDB(comptime column_namespaces: []const ColumnNamespace) type { ) !void { // Load the latest finalized slot from metadata const finalized_slot = self.loadLatestFinalizedSlot(database.DbDefaultNamespace) orelse { - self.logger.info("no finalized state found in database, will use genesis", .{}); + self.logger.info("no finalized slot metadata found in database, will use genesis", .{}); return error.NoFinalizedStateFound; }; - self.logger.info("found latest finalized slot {d}, loading block root...", .{finalized_slot}); + self.logger.info("found latest finalized slot {d} in database, loading block root...", .{finalized_slot}); // Load the block root for this finalized slot const block_root = self.loadFinalizedSlotIndex(database.DbFinalizedSlotsNamespace, finalized_slot) orelse { - self.logger.warn("finalized slot {d} found in metadata but not in finalized index", .{finalized_slot}); + self.logger.warn("finalized slot {d} found in metadata but block root not in finalized index — database may be corrupt", .{finalized_slot}); return error.FinalizedSlotNotFoundInIndex; }; + self.logger.info("found block root 0x{x} for finalized slot {d}, loading state...", .{ &block_root, finalized_slot }); + // Load the state from the database if (self.loadState(database.DbStatesNamespace, block_root)) |state| { state_ptr.* = state; - self.logger.info("successfully loaded finalized state at slot {d}", .{finalized_slot}); + self.logger.info("successfully recovered finalized state from database: slot={d}, block_root=0x{x}", .{ finalized_slot, &block_root }); return; } else { - self.logger.warn("finalized slot {d} found in index but state not in database", .{finalized_slot}); + self.logger.warn("finalized slot {d} block_root=0x{x} found in index but state not in database — state may have been pruned or database is corrupt", .{ finalized_slot, &block_root }); return error.FinalizedStateNotFoundInDatabase; } } From 49762c9078ceaa517f7fd823b0860870d62e908a Mon Sep 17 00:00:00 2001 From: Mercy Boma Naps Nkari <96525594+bomanaps@users.noreply.github.com> Date: Fri, 20 Mar 2026 13:08:23 +0100 Subject: [PATCH 16/24] devnet 3 metrics (#632) * devnet 3 metrics * move aggregated payloads metrics outside mutex scope * address review comment --- pkgs/cli/src/node.zig | 14 ++ pkgs/key-manager/src/lib.zig | 4 +- pkgs/metrics/src/lib.zig | 91 +++++++++-- pkgs/node/src/chain.zig | 2 +- pkgs/node/src/forkchoice.zig | 188 +++++++++++++---------- pkgs/state-transition/src/transition.zig | 12 +- 6 files changed, 215 insertions(+), 96 deletions(-) diff --git a/pkgs/cli/src/node.zig b/pkgs/cli/src/node.zig index d12bbb8f3..7a70d859c 100644 --- a/pkgs/cli/src/node.zig +++ b/pkgs/cli/src/node.zig @@ -308,6 +308,20 @@ pub const Node = struct { // Clean up metrics server if subsequent init operations fail errdefer if (self.metrics_server_handle) |handle| handle.stop(); + // Set validator status gauges on node start + zeam_metrics.metrics.lean_is_aggregator.set(if (options.is_aggregator) 1 else 0); + // Set committee count from config + const committee_count = chain_config.spec.attestation_committee_count; + zeam_metrics.metrics.lean_attestation_committee_count.set(committee_count); + // Set subnet for the first validator (if any) + if (validator_ids.len > 0) { + const first_validator_id: types.ValidatorIndex = @intCast(validator_ids[0]); + const subnet_id = types.computeSubnetId(first_validator_id, committee_count) catch 0; + zeam_metrics.metrics.lean_attestation_committee_subnet.set(subnet_id); + } else { + zeam_metrics.metrics.lean_attestation_committee_subnet.set(0); + } + // Start API server (pass chain pointer for chain-dependent endpoints) self.api_server_handle = try api_server.startAPIServer( allocator, diff --git a/pkgs/key-manager/src/lib.zig b/pkgs/key-manager/src/lib.zig index e1a961d21..ff4d28b1c 100644 --- a/pkgs/key-manager/src/lib.zig +++ b/pkgs/key-manager/src/lib.zig @@ -156,10 +156,12 @@ pub const KeyManager = struct { attestation: *const types.Attestation, allocator: Allocator, ) !xmss.Signature { + zeam_metrics.metrics.lean_pq_sig_attestation_signatures_total.incr(); + const validator_index: usize = @intCast(attestation.validator_id); const keypair = self.keys.get(validator_index) orelse return KeyManagerError.ValidatorKeyNotFound; - const signing_timer = zeam_metrics.lean_pq_signature_attestation_signing_time_seconds.start(); + const signing_timer = zeam_metrics.lean_pq_sig_attestation_signing_time_seconds.start(); var message: [32]u8 = undefined; try zeam_utils.hashTreeRoot(types.AttestationData, attestation.data, &message, allocator); diff --git a/pkgs/metrics/src/lib.zig b/pkgs/metrics/src/lib.zig index 1ed9d9687..122a743b2 100644 --- a/pkgs/metrics/src/lib.zig +++ b/pkgs/metrics/src/lib.zig @@ -44,12 +44,16 @@ const Metrics = struct { lean_attestations_valid_total: ForkChoiceAttestationsValidLabeledCounter, lean_attestations_invalid_total: ForkChoiceAttestationsInvalidLabeledCounter, lean_attestation_validation_time_seconds: ForkChoiceAttestationValidationTimeHistogram, - lean_pq_signature_attestation_signing_time_seconds: PQSignatureSigningHistogram, - lean_pq_signature_attestation_verification_time_seconds: PQSignatureVerificationHistogram, + // Individual attestation signature metrics (renamed to match spec) + lean_pq_sig_attestation_signing_time_seconds: PQSignatureSigningHistogram, + lean_pq_sig_attestation_verification_time_seconds: PQSignatureVerificationHistogram, + lean_pq_sig_attestation_signatures_total: PQSigAttestationSignaturesTotalCounter, + lean_pq_sig_attestation_signatures_valid_total: PQSigAttestationSignaturesValidCounter, + lean_pq_sig_attestation_signatures_invalid_total: PQSigAttestationSignaturesInvalidCounter, // Aggregated attestation signature metrics lean_pq_sig_aggregated_signatures_total: PQSigAggregatedSignaturesTotalCounter, lean_pq_sig_attestations_in_aggregated_signatures_total: PQSigAttestationsInAggregatedTotalCounter, - lean_pq_sig_attestation_signatures_building_time_seconds: PQSigBuildingTimeHistogram, + lean_pq_sig_aggregated_signatures_building_time_seconds: PQSigBuildingTimeHistogram, lean_pq_sig_aggregated_signatures_verification_time_seconds: PQSigAggregatedVerificationHistogram, lean_pq_sig_aggregated_signatures_valid_total: PQSigAggregatedValidCounter, lean_pq_sig_aggregated_signatures_invalid_total: PQSigAggregatedInvalidCounter, @@ -67,6 +71,16 @@ const Metrics = struct { lean_fork_choice_reorg_depth: LeanForkChoiceReorgDepthHistogram, // Finalization metrics lean_finalizations_total: LeanFinalizationsTotalCounter, + // Fork-choice store gauges + lean_gossip_signatures: LeanGossipSignaturesGauge, + lean_latest_new_aggregated_payloads: LeanLatestNewAggregatedPayloadsGauge, + lean_latest_known_aggregated_payloads: LeanLatestKnownAggregatedPayloadsGauge, + // Committee aggregation histogram + lean_committee_signatures_aggregation_time_seconds: CommitteeSignaturesAggregationHistogram, + // Validator status gauges + lean_is_aggregator: LeanIsAggregatorGauge, + lean_attestation_committee_subnet: LeanAttestationCommitteeSubnetGauge, + lean_attestation_committee_count: LeanAttestationCommitteeCountGauge, const ChainHistogram = metrics_lib.Histogram(f32, &[_]f32{ 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10 }); const BlockProcessingHistogram = metrics_lib.Histogram(f32, &[_]f32{ 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10 }); @@ -82,15 +96,19 @@ const Metrics = struct { const SlotsProcessedCounter = metrics_lib.Counter(u64); const AttestationsProcessedCounter = metrics_lib.Counter(u64); const LeanValidatorsCountGauge = metrics_lib.Gauge(u64); - const ForkChoiceBlockProcessingTimeHistogram = metrics_lib.Histogram(f32, &[_]f32{ 0.005, 0.01, 0.025, 0.05, 0.1, 1 }); + const ForkChoiceBlockProcessingTimeHistogram = metrics_lib.Histogram(f32, &[_]f32{ 0.005, 0.01, 0.025, 0.05, 0.1, 1, 1.25, 1.5, 2, 4 }); const ForkChoiceAttestationsValidLabeledCounter = metrics_lib.CounterVec(u64, struct { source: []const u8 }); const ForkChoiceAttestationsInvalidLabeledCounter = metrics_lib.CounterVec(u64, struct { source: []const u8 }); const ForkChoiceAttestationValidationTimeHistogram = metrics_lib.Histogram(f32, &[_]f32{ 0.005, 0.01, 0.025, 0.05, 0.1, 1 }); + // Individual attestation signature metric types + const PQSigAttestationSignaturesTotalCounter = metrics_lib.Counter(u64); + const PQSigAttestationSignaturesValidCounter = metrics_lib.Counter(u64); + const PQSigAttestationSignaturesInvalidCounter = metrics_lib.Counter(u64); // Aggregated attestation signature metric types const PQSigAggregatedSignaturesTotalCounter = metrics_lib.Counter(u64); const PQSigAttestationsInAggregatedTotalCounter = metrics_lib.Counter(u64); - const PQSigBuildingTimeHistogram = metrics_lib.Histogram(f32, &[_]f32{ 0.005, 0.01, 0.025, 0.05, 0.1, 1 }); - const PQSigAggregatedVerificationHistogram = metrics_lib.Histogram(f32, &[_]f32{ 0.005, 0.01, 0.025, 0.05, 0.1, 1 }); + const PQSigBuildingTimeHistogram = metrics_lib.Histogram(f32, &[_]f32{ 0.1, 0.25, 0.5, 0.75, 1, 1.25, 1.5, 2, 4 }); + const PQSigAggregatedVerificationHistogram = metrics_lib.Histogram(f32, &[_]f32{ 0.1, 0.25, 0.5, 0.75, 1, 1.25, 1.5, 2, 4 }); const PQSigAggregatedValidCounter = metrics_lib.Counter(u64); const PQSigAggregatedInvalidCounter = metrics_lib.Counter(u64); // Network peer metric types @@ -107,6 +125,16 @@ const Metrics = struct { const LeanForkChoiceReorgDepthHistogram = metrics_lib.Histogram(f32, &[_]f32{ 1, 2, 3, 5, 7, 10, 20, 30, 50, 100 }); // Finalization metric types const LeanFinalizationsTotalCounter = metrics_lib.CounterVec(u64, struct { result: []const u8 }); + // Fork-choice store gauge types + const LeanGossipSignaturesGauge = metrics_lib.Gauge(u64); + const LeanLatestNewAggregatedPayloadsGauge = metrics_lib.Gauge(u64); + const LeanLatestKnownAggregatedPayloadsGauge = metrics_lib.Gauge(u64); + // Committee aggregation histogram type + const CommitteeSignaturesAggregationHistogram = metrics_lib.Histogram(f32, &[_]f32{ 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 1 }); + // Validator status gauge types + const LeanIsAggregatorGauge = metrics_lib.Gauge(u64); + const LeanAttestationCommitteeSubnetGauge = metrics_lib.Gauge(u64); + const LeanAttestationCommitteeCountGauge = metrics_lib.Gauge(u64); }; /// Timer struct returned to the application. @@ -215,6 +243,12 @@ fn observePQSigAggregatedVerification(ctx: ?*anyopaque, value: f32) void { histogram.observe(value); } +fn observeCommitteeSignaturesAggregation(ctx: ?*anyopaque, value: f32) void { + const histogram_ptr = ctx orelse return; // No-op if not initialized + const histogram: *Metrics.CommitteeSignaturesAggregationHistogram = @ptrCast(@alignCast(histogram_ptr)); + histogram.observe(value); +} + /// The public variables the application interacts with. /// Calling `.start()` on these will start a new timer. pub var chain_onblock_duration_seconds: Histogram = .{ @@ -250,15 +284,15 @@ pub var lean_attestation_validation_time_seconds: Histogram = .{ .context = null, .observe = &observeFCAttestationValidationTimeHistogram, }; -pub var lean_pq_signature_attestation_signing_time_seconds: Histogram = .{ +pub var lean_pq_sig_attestation_signing_time_seconds: Histogram = .{ .context = null, .observe = &observePQSignatureAttestationSigning, }; -pub var lean_pq_signature_attestation_verification_time_seconds: Histogram = .{ +pub var lean_pq_sig_attestation_verification_time_seconds: Histogram = .{ .context = null, .observe = &observePQSignatureAttestationVerification, }; -pub var lean_pq_sig_attestation_signatures_building_time_seconds: Histogram = .{ +pub var lean_pq_sig_aggregated_signatures_building_time_seconds: Histogram = .{ .context = null, .observe = &observePQSigBuildingTime, }; @@ -266,6 +300,10 @@ pub var lean_pq_sig_aggregated_signatures_verification_time_seconds: Histogram = .context = null, .observe = &observePQSigAggregatedVerification, }; +pub var lean_committee_signatures_aggregation_time_seconds: Histogram = .{ + .context = null, + .observe = &observeCommitteeSignaturesAggregation, +}; /// Initializes the metrics system. Must be called once at startup. pub fn init(allocator: std.mem.Allocator) !void { @@ -295,12 +333,16 @@ pub fn init(allocator: std.mem.Allocator) !void { .lean_attestations_valid_total = try Metrics.ForkChoiceAttestationsValidLabeledCounter.init(allocator, "lean_attestations_valid_total", .{ .help = "Total number of valid attestations labeled by source (gossip or block)." }, .{}), .lean_attestations_invalid_total = try Metrics.ForkChoiceAttestationsInvalidLabeledCounter.init(allocator, "lean_attestations_invalid_total", .{ .help = "Total number of invalid attestations labeled by source (gossip or block)." }, .{}), .lean_attestation_validation_time_seconds = Metrics.ForkChoiceAttestationValidationTimeHistogram.init("lean_attestation_validation_time_seconds", .{ .help = "Time taken to validate attestation." }, .{}), - .lean_pq_signature_attestation_signing_time_seconds = Metrics.PQSignatureSigningHistogram.init("lean_pq_signature_attestation_signing_time_seconds", .{ .help = "Time taken to sign an attestation." }, .{}), - .lean_pq_signature_attestation_verification_time_seconds = Metrics.PQSignatureVerificationHistogram.init("lean_pq_signature_attestation_verification_time_seconds", .{ .help = "Time taken to verify an attestation signature." }, .{}), + // Individual attestation signature metrics (renamed to match spec) + .lean_pq_sig_attestation_signing_time_seconds = Metrics.PQSignatureSigningHistogram.init("lean_pq_sig_attestation_signing_time_seconds", .{ .help = "Time taken to sign an attestation." }, .{}), + .lean_pq_sig_attestation_verification_time_seconds = Metrics.PQSignatureVerificationHistogram.init("lean_pq_sig_attestation_verification_time_seconds", .{ .help = "Time taken to verify an attestation signature." }, .{}), + .lean_pq_sig_attestation_signatures_total = Metrics.PQSigAttestationSignaturesTotalCounter.init("lean_pq_sig_attestation_signatures_total", .{ .help = "Total number of individual attestation signatures." }, .{}), + .lean_pq_sig_attestation_signatures_valid_total = Metrics.PQSigAttestationSignaturesValidCounter.init("lean_pq_sig_attestation_signatures_valid_total", .{ .help = "Total number of valid individual attestation signatures." }, .{}), + .lean_pq_sig_attestation_signatures_invalid_total = Metrics.PQSigAttestationSignaturesInvalidCounter.init("lean_pq_sig_attestation_signatures_invalid_total", .{ .help = "Total number of invalid individual attestation signatures." }, .{}), // Aggregated attestation signature metrics .lean_pq_sig_aggregated_signatures_total = Metrics.PQSigAggregatedSignaturesTotalCounter.init("lean_pq_sig_aggregated_signatures_total", .{ .help = "Total number of aggregated signatures." }, .{}), .lean_pq_sig_attestations_in_aggregated_signatures_total = Metrics.PQSigAttestationsInAggregatedTotalCounter.init("lean_pq_sig_attestations_in_aggregated_signatures_total", .{ .help = "Total number of attestations included into aggregated signatures." }, .{}), - .lean_pq_sig_attestation_signatures_building_time_seconds = Metrics.PQSigBuildingTimeHistogram.init("lean_pq_sig_attestation_signatures_building_time_seconds", .{ .help = "Time taken to build aggregated attestation signatures." }, .{}), + .lean_pq_sig_aggregated_signatures_building_time_seconds = Metrics.PQSigBuildingTimeHistogram.init("lean_pq_sig_aggregated_signatures_building_time_seconds", .{ .help = "Time taken to build an aggregated attestation signature." }, .{}), .lean_pq_sig_aggregated_signatures_verification_time_seconds = Metrics.PQSigAggregatedVerificationHistogram.init("lean_pq_sig_aggregated_signatures_verification_time_seconds", .{ .help = "Time taken to verify an aggregated attestation signature." }, .{}), .lean_pq_sig_aggregated_signatures_valid_total = Metrics.PQSigAggregatedValidCounter.init("lean_pq_sig_aggregated_signatures_valid_total", .{ .help = "Total number of valid aggregated signatures." }, .{}), .lean_pq_sig_aggregated_signatures_invalid_total = Metrics.PQSigAggregatedInvalidCounter.init("lean_pq_sig_aggregated_signatures_invalid_total", .{ .help = "Total number of invalid aggregated signatures." }, .{}), @@ -318,10 +360,28 @@ pub fn init(allocator: std.mem.Allocator) !void { .lean_fork_choice_reorg_depth = Metrics.LeanForkChoiceReorgDepthHistogram.init("lean_fork_choice_reorg_depth", .{ .help = "Depth of fork choice reorgs in blocks." }, .{}), // Finalization metrics .lean_finalizations_total = try Metrics.LeanFinalizationsTotalCounter.init(allocator, "lean_finalizations_total", .{ .help = "Total finalization attempts by result." }, .{}), + // Fork-choice store gauges + .lean_gossip_signatures = Metrics.LeanGossipSignaturesGauge.init("lean_gossip_signatures", .{ .help = "Number of gossip signatures in fork-choice store." }, .{}), + .lean_latest_new_aggregated_payloads = Metrics.LeanLatestNewAggregatedPayloadsGauge.init("lean_latest_new_aggregated_payloads", .{ .help = "Number of new aggregated payload items." }, .{}), + .lean_latest_known_aggregated_payloads = Metrics.LeanLatestKnownAggregatedPayloadsGauge.init("lean_latest_known_aggregated_payloads", .{ .help = "Number of known aggregated payload items." }, .{}), + // Committee aggregation histogram + .lean_committee_signatures_aggregation_time_seconds = Metrics.CommitteeSignaturesAggregationHistogram.init("lean_committee_signatures_aggregation_time_seconds", .{ .help = "Time taken to aggregate committee signatures." }, .{}), + // Validator status gauges + .lean_is_aggregator = Metrics.LeanIsAggregatorGauge.init("lean_is_aggregator", .{ .help = "Validator's is_aggregator status. True=1, False=0." }, .{}), + .lean_attestation_committee_subnet = Metrics.LeanAttestationCommitteeSubnetGauge.init("lean_attestation_committee_subnet", .{ .help = "Node's attestation committee subnet." }, .{}), + .lean_attestation_committee_count = Metrics.LeanAttestationCommitteeCountGauge.init("lean_attestation_committee_count", .{ .help = "Number of attestation committees." }, .{}), }; // Initialize validators count to 0 by default (spec requires "On scrape" availability) metrics.lean_validators_count.set(0); + // Initialize committee-related gauges to 0 (placeholder until subnet logic is implemented) + metrics.lean_is_aggregator.set(0); + metrics.lean_attestation_committee_subnet.set(0); + metrics.lean_attestation_committee_count.set(0); + // Initialize fork-choice store gauges to 0 + metrics.lean_gossip_signatures.set(0); + metrics.lean_latest_new_aggregated_payloads.set(0); + metrics.lean_latest_known_aggregated_payloads.set(0); // Set context for histogram wrappers (observe functions already assigned at compile time) chain_onblock_duration_seconds.context = @ptrCast(&metrics.chain_onblock_duration_seconds); @@ -332,10 +392,11 @@ pub fn init(allocator: std.mem.Allocator) !void { lean_state_transition_attestations_processing_time_seconds.context = @ptrCast(&metrics.lean_state_transition_attestations_processing_time_seconds); lean_fork_choice_block_processing_time_seconds.context = @ptrCast(&metrics.lean_fork_choice_block_processing_time_seconds); lean_attestation_validation_time_seconds.context = @ptrCast(&metrics.lean_attestation_validation_time_seconds); - lean_pq_signature_attestation_signing_time_seconds.context = @ptrCast(&metrics.lean_pq_signature_attestation_signing_time_seconds); - lean_pq_signature_attestation_verification_time_seconds.context = @ptrCast(&metrics.lean_pq_signature_attestation_verification_time_seconds); - lean_pq_sig_attestation_signatures_building_time_seconds.context = @ptrCast(&metrics.lean_pq_sig_attestation_signatures_building_time_seconds); + lean_pq_sig_attestation_signing_time_seconds.context = @ptrCast(&metrics.lean_pq_sig_attestation_signing_time_seconds); + lean_pq_sig_attestation_verification_time_seconds.context = @ptrCast(&metrics.lean_pq_sig_attestation_verification_time_seconds); + lean_pq_sig_aggregated_signatures_building_time_seconds.context = @ptrCast(&metrics.lean_pq_sig_aggregated_signatures_building_time_seconds); lean_pq_sig_aggregated_signatures_verification_time_seconds.context = @ptrCast(&metrics.lean_pq_sig_aggregated_signatures_verification_time_seconds); + lean_committee_signatures_aggregation_time_seconds.context = @ptrCast(&metrics.lean_committee_signatures_aggregation_time_seconds); g_initialized = true; } diff --git a/pkgs/node/src/chain.zig b/pkgs/node/src/chain.zig index 17bfe23a3..a00942bfe 100644 --- a/pkgs/node/src/chain.zig +++ b/pkgs/node/src/chain.zig @@ -414,7 +414,7 @@ pub const BeamChain = struct { self.forkChoice.signatures_mutex.lock(); defer self.forkChoice.signatures_mutex.unlock(); - const building_timer = zeam_metrics.lean_pq_sig_attestation_signatures_building_time_seconds.start(); + const building_timer = zeam_metrics.lean_pq_sig_aggregated_signatures_building_time_seconds.start(); try aggregation.computeAggregatedSignatures( attestations, &pre_state.validators, diff --git a/pkgs/node/src/forkchoice.zig b/pkgs/node/src/forkchoice.zig index 0fa6f14e7..bbe2aff3c 100644 --- a/pkgs/node/src/forkchoice.zig +++ b/pkgs/node/src/forkchoice.zig @@ -864,6 +864,10 @@ pub const ForkChoice = struct { // Internal unlocked version - assumes caller holds lock fn acceptNewAttestationsUnlocked(self: *Self) !ProtoBlock { + // Capture counts outside lock scope for metrics update + var known_payloads_count: usize = 0; + var new_payloads_count: usize = 0; + var payloads_updated = false; { // Keep payload migration synchronized with other signature/payload map writers. self.signatures_mutex.lock(); @@ -891,8 +895,17 @@ pub const ForkChoice = struct { source_list.* = .empty; } self.latest_new_aggregated_payloads.clearAndFree(); + // Capture counts for metrics update outside lock + known_payloads_count = self.latest_known_aggregated_payloads.count(); + new_payloads_count = self.latest_new_aggregated_payloads.count(); + payloads_updated = true; } } + // Update fork-choice store gauges after promotion (outside lock scope) + if (payloads_updated) { + zeam_metrics.metrics.lean_latest_known_aggregated_payloads.set(@intCast(known_payloads_count)); + zeam_metrics.metrics.lean_latest_new_aggregated_payloads.set(@intCast(new_payloads_count)); + } // Promote latestNew → latestKnown in attestation tracker. // Attestations that were "new" (gossip) are now "known" (accepted). @@ -1142,6 +1155,7 @@ pub const ForkChoice = struct { // Store attestation data by root for later aggregation const data_root = try attestation_data.sszRoot(self.allocator); + var gossip_signatures_count: usize = 0; { self.signatures_mutex.lock(); defer self.signatures_mutex.unlock(); @@ -1156,7 +1170,10 @@ pub const ForkChoice = struct { .slot = attestation_slot, .signature = signed_attestation.signature, }); + gossip_signatures_count = self.gossip_signatures.count(); } + // Update metric outside lock scope + zeam_metrics.metrics.lean_gossip_signatures.set(@intCast(gossip_signatures_count)); const attestation = types.Attestation{ .validator_id = validator_id, @@ -1257,46 +1274,17 @@ pub const ForkChoice = struct { } fn aggregateCommitteeSignaturesUnlocked(self: *Self, state_opt: ?*const types.BeamState) ![]types.SignedAggregatedAttestation { + const aggregation_timer = zeam_metrics.lean_committee_signatures_aggregation_time_seconds.start(); + defer _ = aggregation_timer.observe(); + const state = state_opt orelse return try self.allocator.alloc(types.SignedAggregatedAttestation, 0); var attestations: std.ArrayList(types.Attestation) = .{}; defer attestations.deinit(self.allocator); - self.signatures_mutex.lock(); - defer self.signatures_mutex.unlock(); - - var sig_it = self.gossip_signatures.iterator(); - while (sig_it.next()) |entry| { - const sig_key = entry.key_ptr.*; - const attestation_data = self.attestation_data_by_root.get(sig_key.data_root) orelse continue; - try attestations.append(self.allocator, .{ - .validator_id = sig_key.validator_id, - .data = attestation_data, - }); - } - - var aggregation = try types.AggregatedAttestationsResult.init(self.allocator); - var agg_att_cleanup = true; - var agg_sig_cleanup = true; - errdefer if (agg_att_cleanup) { - for (aggregation.attestations.slice()) |*att| { - att.deinit(); - } - aggregation.attestations.deinit(); - }; - errdefer if (agg_sig_cleanup) { - for (aggregation.attestation_signatures.slice()) |*sig| { - sig.deinit(); - } - aggregation.attestation_signatures.deinit(); - }; - - try aggregation.computeAggregatedSignatures( - attestations.items, - &state.validators, - &self.gossip_signatures, - null, - ); + // Capture counts for metrics update outside lock scope + var new_payloads_count: usize = 0; + var gossip_sigs_count: usize = 0; var results: std.ArrayList(types.SignedAggregatedAttestation) = .{}; errdefer { @@ -1306,59 +1294,105 @@ pub const ForkChoice = struct { results.deinit(self.allocator); } - const agg_attestations = aggregation.attestations.constSlice(); - const agg_signatures = aggregation.attestation_signatures.constSlice(); + { + self.signatures_mutex.lock(); + defer self.signatures_mutex.unlock(); - for (agg_attestations, 0..) |agg_att, index| { - const proof = agg_signatures[index]; - const data_root = try agg_att.data.sszRoot(self.allocator); + var sig_it = self.gossip_signatures.iterator(); + while (sig_it.next()) |entry| { + const sig_key = entry.key_ptr.*; + const attestation_data = self.attestation_data_by_root.get(sig_key.data_root) orelse continue; + try attestations.append(self.allocator, .{ + .validator_id = sig_key.validator_id, + .data = attestation_data, + }); + } - try self.attestation_data_by_root.put(data_root, agg_att.data); + var aggregation = try types.AggregatedAttestationsResult.init(self.allocator); + var agg_att_cleanup = true; + var agg_sig_cleanup = true; + errdefer if (agg_att_cleanup) { + for (aggregation.attestations.slice()) |*att| { + att.deinit(); + } + aggregation.attestations.deinit(); + }; + errdefer if (agg_sig_cleanup) { + for (aggregation.attestation_signatures.slice()) |*sig| { + sig.deinit(); + } + aggregation.attestation_signatures.deinit(); + }; - var validator_indices = try types.aggregationBitsToValidatorIndices(&proof.participants, self.allocator); - defer validator_indices.deinit(self.allocator); + try aggregation.computeAggregatedSignatures( + attestations.items, + &state.validators, + &self.gossip_signatures, + null, + ); - for (validator_indices.items) |validator_index| { - const sig_key = SignatureKey{ - .validator_id = @intCast(validator_index), - .data_root = data_root, - }; - const gop = try self.latest_new_aggregated_payloads.getOrPut(sig_key); - if (!gop.found_existing) { - gop.value_ptr.* = .empty; + const agg_attestations = aggregation.attestations.constSlice(); + const agg_signatures = aggregation.attestation_signatures.constSlice(); + + for (agg_attestations, 0..) |agg_att, index| { + const proof = agg_signatures[index]; + const data_root = try agg_att.data.sszRoot(self.allocator); + + try self.attestation_data_by_root.put(data_root, agg_att.data); + + var validator_indices = try types.aggregationBitsToValidatorIndices(&proof.participants, self.allocator); + defer validator_indices.deinit(self.allocator); + + for (validator_indices.items) |validator_index| { + const sig_key = SignatureKey{ + .validator_id = @intCast(validator_index), + .data_root = data_root, + }; + const gop = try self.latest_new_aggregated_payloads.getOrPut(sig_key); + if (!gop.found_existing) { + gop.value_ptr.* = .empty; + } + + var cloned_proof: types.AggregatedSignatureProof = undefined; + try types.sszClone(self.allocator, types.AggregatedSignatureProof, proof, &cloned_proof); + errdefer cloned_proof.deinit(); + try gop.value_ptr.append(self.allocator, .{ + .slot = agg_att.data.slot, + .proof = cloned_proof, + }); + // Align with leanSpec: once this signature is represented by an aggregated + // payload, remove it from the gossip signature map to prevent re-aggregation. + _ = self.gossip_signatures.remove(sig_key); } - var cloned_proof: types.AggregatedSignatureProof = undefined; - try types.sszClone(self.allocator, types.AggregatedSignatureProof, proof, &cloned_proof); - errdefer cloned_proof.deinit(); - try gop.value_ptr.append(self.allocator, .{ - .slot = agg_att.data.slot, - .proof = cloned_proof, + var output_proof: types.AggregatedSignatureProof = undefined; + try types.sszClone(self.allocator, types.AggregatedSignatureProof, proof, &output_proof); + errdefer output_proof.deinit(); + try results.append(self.allocator, .{ + .data = agg_att.data, + .proof = output_proof, }); - // Align with leanSpec: once this signature is represented by an aggregated - // payload, remove it from the gossip signature map to prevent re-aggregation. - _ = self.gossip_signatures.remove(sig_key); } - var output_proof: types.AggregatedSignatureProof = undefined; - try types.sszClone(self.allocator, types.AggregatedSignatureProof, proof, &output_proof); - errdefer output_proof.deinit(); - try results.append(self.allocator, .{ - .data = agg_att.data, - .proof = output_proof, - }); - } + agg_att_cleanup = false; + agg_sig_cleanup = false; + for (aggregation.attestations.slice()) |*att| { + att.deinit(); + } + aggregation.attestations.deinit(); + for (aggregation.attestation_signatures.slice()) |*sig| { + sig.deinit(); + } + aggregation.attestation_signatures.deinit(); - agg_att_cleanup = false; - agg_sig_cleanup = false; - for (aggregation.attestations.slice()) |*att| { - att.deinit(); - } - aggregation.attestations.deinit(); - for (aggregation.attestation_signatures.slice()) |*sig| { - sig.deinit(); + // Capture counts before lock is released + new_payloads_count = self.latest_new_aggregated_payloads.count(); + gossip_sigs_count = self.gossip_signatures.count(); } - aggregation.attestation_signatures.deinit(); + + // Update fork-choice store gauges after aggregation (outside lock scope) + zeam_metrics.metrics.lean_latest_new_aggregated_payloads.set(@intCast(new_payloads_count)); + zeam_metrics.metrics.lean_gossip_signatures.set(@intCast(gossip_sigs_count)); return results.toOwnedSlice(self.allocator); } diff --git a/pkgs/state-transition/src/transition.zig b/pkgs/state-transition/src/transition.zig index 5fdd9694c..e3ab85f46 100644 --- a/pkgs/state-transition/src/transition.zig +++ b/pkgs/state-transition/src/transition.zig @@ -178,14 +178,22 @@ pub fn verifySingleAttestation( const validator = &validators[validatorIndex]; const pubkey = validator.getPubkey(); - const verification_timer = zeam_metrics.lean_pq_signature_attestation_verification_time_seconds.start(); + const verification_timer = zeam_metrics.lean_pq_sig_attestation_verification_time_seconds.start(); var message: [32]u8 = undefined; try zeam_utils.hashTreeRoot(types.AttestationData, attestation_data.*, &message, allocator); const epoch: u32 = @intCast(attestation_data.slot); - try xmss.verifySsz(pubkey, &message, epoch, signatureBytes); + // Increment total signatures counter for verification path (signatures received from wire) + zeam_metrics.metrics.lean_pq_sig_attestation_signatures_total.incr(); + + xmss.verifySsz(pubkey, &message, epoch, signatureBytes) catch |err| { + _ = verification_timer.observe(); + zeam_metrics.metrics.lean_pq_sig_attestation_signatures_invalid_total.incr(); + return err; + }; _ = verification_timer.observe(); + zeam_metrics.metrics.lean_pq_sig_attestation_signatures_valid_total.incr(); } // TODO(gballet) check if beam block needs to be a pointer From dcb7e75b5a767100c9d3a99ee10b539495e8d83e Mon Sep 17 00:00:00 2001 From: zclawz Date: Fri, 20 Mar 2026 19:04:43 +0530 Subject: [PATCH 17/24] refactor: extract wipeAndReopenDb helper, remove duplicated db-wipe logic (closes #682) (#684) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The db.deinit() + deleteTreeAbsolute + Db.open() pattern was duplicated in two branches of Node.init: 1. Genesis time mismatch (known stale db) — FileNotFound is an error 2. No finalized state found (fresh install) — FileNotFound is ignored Extracted into wipeAndReopenDb(db, allocator, database_path, logger_config, logger, ignore_not_found) which consolidates the two paths and makes the FileNotFound distinction explicit via the bool parameter. Co-authored-by: zclawz --- pkgs/cli/src/node.zig | 49 ++++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/pkgs/cli/src/node.zig b/pkgs/cli/src/node.zig index 7a70d859c..b24c078cf 100644 --- a/pkgs/cli/src/node.zig +++ b/pkgs/cli/src/node.zig @@ -130,6 +130,33 @@ pub const Node = struct { const Self = @This(); + /// Closes the current database, wipes the on-disk rocksdb directory, and + /// reopens a fresh database at the same path. + /// + /// If `ignore_not_found` is true, `error.FileNotFound` from the directory + /// deletion is silently swallowed (used for first-run installs where the + /// db directory has never been created). Set it to false when wiping a db + /// that is known to exist (genesis time mismatch case). + fn wipeAndReopenDb( + db: *database.Db, + allocator: std.mem.Allocator, + database_path: []const u8, + logger_config: *LoggerConfig, + logger: zeam_utils.ModuleLogger, + ignore_not_found: bool, + ) !void { + db.deinit(); + const rocksdb_path = try std.fmt.allocPrint(allocator, "{s}/rocksdb", .{database_path}); + defer allocator.free(rocksdb_path); + std.fs.deleteTreeAbsolute(rocksdb_path) catch |wipe_err| { + if (!ignore_not_found or wipe_err != error.FileNotFound) { + logger.err("failed to delete database directory '{s}': {any}", .{ rocksdb_path, wipe_err }); + return wipe_err; + } + }; + db.* = try database.Db.open(allocator, logger_config.logger(.database), database_path); + } + pub fn init( self: *Self, allocator: std.mem.Allocator, @@ -201,14 +228,7 @@ pub const Node = struct { local_finalized_state.config.genesis_time, chain_config.genesis.genesis_time, }); - db.deinit(); - const rocksdb_path = try std.fmt.allocPrint(allocator, "{s}/rocksdb", .{options.database_path}); - defer allocator.free(rocksdb_path); - std.fs.deleteTreeAbsolute(rocksdb_path) catch |wipe_err| { - self.logger.err("failed to delete stale database directory '{s}': {any}", .{ rocksdb_path, wipe_err }); - return wipe_err; - }; - db = try database.Db.open(allocator, options.logger_config.logger(.database), options.database_path); + try wipeAndReopenDb(&db, allocator, options.database_path, options.logger_config, self.logger, false); self.logger.info("stale database wiped, starting fresh & generating genesis", .{}); local_finalized_state.deinit(); @@ -218,17 +238,8 @@ pub const Node = struct { } } else |_| { self.logger.info("no finalized state found in db, wiping database for a clean slate", .{}); - db.deinit(); - const rocksdb_path_fresh = try std.fmt.allocPrint(allocator, "{s}/rocksdb", .{options.database_path}); - defer allocator.free(rocksdb_path_fresh); - std.fs.deleteTreeAbsolute(rocksdb_path_fresh) catch |wipe_err| { - // Ignore NotFound — db may not exist yet, that is fine - if (wipe_err != error.FileNotFound) { - self.logger.err("failed to delete database directory '{s}': {any}", .{ rocksdb_path_fresh, wipe_err }); - return wipe_err; - } - }; - db = try database.Db.open(allocator, options.logger_config.logger(.database), options.database_path); + // ignore_not_found=true: db dir may not exist yet on a fresh install + try wipeAndReopenDb(&db, allocator, options.database_path, options.logger_config, self.logger, true); self.logger.info("starting fresh & generating genesis", .{}); try self.anchor_state.genGenesisState(allocator, chain_config.genesis); } From 49824ea0ef090011990b4deb7fc45644c02334c7 Mon Sep 17 00:00:00 2001 From: zclawz Date: Fri, 20 Mar 2026 22:44:38 +0530 Subject: [PATCH 18/24] chore: bump lean-quickstart to a96b5142ea37f7ec3f5ee7046e6d311185491285 (#687) Co-authored-by: ZclawZ --- lean-quickstart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lean-quickstart b/lean-quickstart index 9fd56dbba..a96b5142e 160000 --- a/lean-quickstart +++ b/lean-quickstart @@ -1 +1 @@ -Subproject commit 9fd56dbba80901434a02e350ee1fa75a2c4d179f +Subproject commit a96b5142ea37f7ec3f5ee7046e6d311185491285 From bfa16211ad49c50b6ecb8e4d244430dc26240555 Mon Sep 17 00:00:00 2001 From: Parthasarathy Ramanujam <1627026+ch4r10t33r@users.noreply.github.com> Date: Mon, 23 Mar 2026 11:22:44 +0000 Subject: [PATCH 19/24] clock, chain, node: fix silent crashes during cached-block catch-up sync (#681) Three bugs caused the node to crash silently (Zig ReleaseFast trap) when processing a burst of cached blocks while syncing from a checkpoint: 1. clock: `_ = r catch unreachable` triggered a trap on the expected `error.Canceled` that xev emits when a timer fires after being re-armed. The error is now handled explicitly: `Canceled` is silently ignored, any other value panics with a message. 2. chain: `processFinalizationAdvancement` assumed `finalized_roots` is non-empty before performing `finalized_roots[1..finalized_roots.len]` slice arithmetic. If the fork choice had already been rebased past the requested root `getCanonicalViewAndAnalysis` can return an empty slice, causing an out-of-bounds panic. The function now returns early with a warning instead. The `usize` subtraction for the orphan-count log line was also guarded against underflow. 3. node: `processCachedDescendants` did not call `chain.onBlockFollowup` after successfully integrating a cached block, unlike the sibling path `processBlockByRootChunk`. This meant head/justification/finalization events were not emitted and `last_emitted_finalized` was not advanced as the node processed a chain of cached blocks, leading to a large deferred finalization jump that triggered the panics above. --- pkgs/node/src/chain.zig | 22 ++++++++++++++++++++-- pkgs/node/src/clock.zig | 8 +++++++- pkgs/node/src/node.zig | 10 ++++++++++ 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/pkgs/node/src/chain.zig b/pkgs/node/src/chain.zig index a00942bfe..743881566 100644 --- a/pkgs/node/src/chain.zig +++ b/pkgs/node/src/chain.zig @@ -1172,14 +1172,32 @@ pub const BeamChain = struct { defer self.allocator.free(non_finalized_descendants); defer self.allocator.free(non_canonical_roots); + // getCanonicalViewAndAnalysis should always include the new finalized root itself. + // If it returns empty the fork choice has already been rebased past this root — bail + // out rather than performing an out-of-bounds slice on finalized_roots[1..]. + if (finalized_roots.len == 0) { + self.logger.warn("finalization advancement from slot={d} to slot={d} skipped: canonical analysis returned no roots (fork choice may have already been rebased past this checkpoint)", .{ + previousFinalized.slot, + latestFinalized.slot, + }); + return; + } + // finalized_ancestor_roots has the previous finalized included const newly_finalized_count = finalized_roots.len - 1; + const slot_gap = latestFinalized.slot - previousFinalized.slot; + const orphaned_count = if (slot_gap >= newly_finalized_count) slot_gap - newly_finalized_count else blk: { + self.logger.debug("finalization: newly_finalized_count={d} exceeds slot_gap={d}; orphaned count clamped to 0 (fork choice may contain more canonical roots than slot distance)", .{ + newly_finalized_count, + slot_gap, + }); + break :blk @as(u64, 0); + }; self.logger.info("finalization canonicality analysis (previousFinalized slot={d} to latestFinalized slot={d}): newly finalized={d}, orphaned/missing={d}, non finalized descendants={d} & finalized non canonical={d}", .{ previousFinalized.slot, - // latestFinalized.slot, newly_finalized_count, - latestFinalized.slot - previousFinalized.slot - newly_finalized_count, + orphaned_count, non_finalized_descendants.len, non_canonical_roots.len, }); diff --git a/pkgs/node/src/clock.zig b/pkgs/node/src/clock.zig index a924b5a21..33a2b2578 100644 --- a/pkgs/node/src/clock.zig +++ b/pkgs/node/src/clock.zig @@ -85,7 +85,13 @@ pub const Clock = struct { _: *xev.Completion, r: xev.Timer.RunError!void, ) xev.CallbackAction { - _ = r catch unreachable; + r catch |err| { + // Canceled is expected when tickInterval re-arms a still-pending + // completion (the old fire arrives with Canceled). Swallow it + // silently; the new timer is already scheduled. + if (err != error.Canceled) std.debug.panic("unexpected xev timer error: {}", .{err}); + return .disarm; + }; if (ud) |cb_wrapper| { _ = cb_wrapper.onInterval() catch void; } diff --git a/pkgs/node/src/node.zig b/pkgs/node/src/node.zig index 80f36366e..05329d44c 100644 --- a/pkgs/node/src/node.zig +++ b/pkgs/node/src/node.zig @@ -445,6 +445,16 @@ pub const BeamNode = struct { .{&descendant_root}, ); + // Run the same post-block followup that processBlockByRootChunk performs: + // emits head/justification/finalization events and advances finalization. + // Note: onBlockFollowup currently ignores the signedBlock pointer (_ = signedBlock), + // so the ordering relative to removeFetchedBlock is not a memory-safety requirement + // today — kept here as good practice for when the parameter is wired up. + // Note: pruneForkchoice=true means processFinalizationAdvancement may fire on every + // iteration of a deep cached-block chain. Correct semantically; a future optimisation + // could pass false during catch-up and prune once at the end. + self.chain.onBlockFollowup(true, cached_block); + // Remove from cache now that it's been processed _ = self.network.removeFetchedBlock(descendant_root); From af0ee66d9d3ca941c6ead6257bd0bc7182756ff0 Mon Sep 17 00:00:00 2001 From: Parthasarathy Ramanujam <1627026+ch4r10t33r@users.noreply.github.com> Date: Mon, 23 Mar 2026 16:35:52 +0000 Subject: [PATCH 20/24] cli: fix checkpoint sync panic on chunked HTTP responses (#689) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the SSZ state grows beyond ~3 MB the server switches from sending a Content-Length response to Transfer-Encoding: chunked. The previous body-reading loop called readSliceShort which internally goes through: readSliceShort → readVec → defaultReadVec → contentLengthStream contentLengthStream accesses reader.state.body_remaining_content_length but that field is not active for chunked responses (state is 'ready'), causing a panic: thread 1 panic: access of union field 'body_remaining_content_length' while field 'ready' is active Replace the manual request/response loop with client.fetch() using a std.Io.Writer.Allocating as the response_writer. fetch() calls response.readerDecompressing() + streamRemaining() which dispatches through chunkedStream or contentLengthStream correctly based on the actual transfer encoding used by the server. --- pkgs/cli/src/node.zig | 60 +++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 39 deletions(-) diff --git a/pkgs/cli/src/node.zig b/pkgs/cli/src/node.zig index b24c078cf..5cbabf025 100644 --- a/pkgs/cli/src/node.zig +++ b/pkgs/cli/src/node.zig @@ -665,54 +665,36 @@ fn downloadCheckpointState( ) !types.BeamState { logger.info("downloading checkpoint state from: {s}", .{url}); - // Parse URL using std.Uri - const uri = std.Uri.parse(url) catch return error.InvalidUrl; - - // Initialize HTTP client var client = std.http.Client{ .allocator = allocator }; defer client.deinit(); - // Create HTTP request - var req = client.request(.GET, uri, .{}) catch |err| { - logger.err("failed to create HTTP request: {any}", .{err}); - return error.ConnectionFailed; - }; - defer req.deinit(); - - // Send the request (GET has no body) - req.sendBodiless() catch |err| { - logger.err("failed to send HTTP request: {any}", .{err}); + // Use an Allocating writer so client.fetch handles both Content-Length and + // Transfer-Encoding: chunked transparently. The previous manual readSliceShort + // loop panicked when the server switched to chunked encoding for responses + // larger than ~3 MB because readSliceShort → readVec → defaultReadVec → + // contentLengthStream panics when the body union field is 'ready' (chunked) + // rather than 'body_remaining_content_length'. + var body_writer = std.Io.Writer.Allocating.init(allocator); + defer body_writer.deinit(); + + const result = client.fetch(.{ + .location = .{ .url = url }, + .method = .GET, + .response_writer = &body_writer.writer, + }) catch |err| { + logger.err("checkpoint sync request failed: {any}", .{err}); return error.RequestFailed; }; - // Receive response headers - var redirect_buffer: [1024]u8 = undefined; - var response = req.receiveHead(&redirect_buffer) catch |err| { - logger.err("failed to receive HTTP response: {any}", .{err}); - return error.ResponseFailed; - }; - - // Check HTTP status - if (response.head.status != .ok) { - logger.err("checkpoint sync failed: HTTP {d}", .{@intFromEnum(response.head.status)}); + if (result.status != .ok) { + logger.err("checkpoint sync failed: HTTP {d}", .{@intFromEnum(result.status)}); return error.HttpError; } - // Read response body - var ssz_data: std.ArrayList(u8) = .empty; - errdefer ssz_data.deinit(allocator); - - var transfer_buffer: [8192]u8 = undefined; - const body_reader = response.reader(&transfer_buffer); - var buffer: [8192]u8 = undefined; - while (true) { - const bytes_read = body_reader.readSliceShort(&buffer) catch |err| { - logger.err("failed to read response body: {any}", .{err}); - return error.ReadFailed; - }; - if (bytes_read == 0) break; - try ssz_data.appendSlice(allocator, buffer[0..bytes_read]); - } + // Transfer ownership out of the writer (writer buffer becomes empty so the + // deferred deinit above is safe to call). + var ssz_data = body_writer.toArrayList(); + defer ssz_data.deinit(allocator); logger.info("downloaded checkpoint state: {d} bytes", .{ssz_data.items.len}); From aac823ed25d6a98e19c0189dbb2ca4212a2c490a Mon Sep 17 00:00:00 2001 From: zclawz Date: Wed, 25 Mar 2026 12:26:38 +0530 Subject: [PATCH 21/24] fix: subnet subscription for attestations (#685) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: subnet subscription for attestations (closes #683) - Add --subnet-ids CLI flag (comma-separated) to explicitly configure which attestation subnets to subscribe and aggregate - When --subnet-ids is provided, subscribe only to those subnets at the libp2p gossip level (overrides auto-computation from validator ids) - Gate forkchoice import (onGossipAttestation) behind is_aggregator flag: gossip attestations are only imported if --is-aggregator is set - When --subnet-ids is also set, further filter imports to the specified subnets only; aggregator without --subnet-ids imports from all subscribed subnets - Proposer attestations from blocks continue to be imported unconditionally (they go through the block processing path, not onGossipAttestation) - Propagate subnet_ids through NodeOpts -> BeamNode -> ChainOpts -> BeamChain * fix: rename subnet-ids to aggregate-subnet-ids, subscribe to all subnets - Rename CLI flag --subnet-ids to --aggregate-subnet-ids per review - Subscribe to both explicit aggregate-subnet-ids AND validator-derived subnets (not exclusive else-if); deduplicate with a seen set - Fall back to subnet 0 only if no subnets from either source * fix: use ReleaseFast for risc0ospkg host tool to fix docker-build CI * fix: validate is-aggregator before aggregate-subnet-ids; scope no-filter import to validator subnet * fix: gate attestation p2p subscription on is_aggregator Non-aggregator nodes never import gossip attestations (chain.zig guards import behind is_aggregator_enabled). Subscribing to attestation subnets at the libp2p layer when the node will never consume them wastes network bandwidth — the messages are received, validated, and then immediately discarded. Move the is_aggregator guard up to the p2p subscription step so non-aggregators never join attestation subnet topics at all. Addresses review comment blockblaz/zeam#685 (chain.zig line 790). * fix: move subnet filtering to p2p subscription layer, not node import level Per @anshalshukla review: when no explicit --aggregate-subnet-ids filter is set, trust the p2p subscription (node.zig already subscribes only to validator-derived subnets at libp2p level). Removing the redundant computeSubnetId loop in chain.zig that was rejecting at the node level instead of preventing bandwidth use at the network layer. * fix: rename aggregate-subnet-ids to import-subnet-ids, subscribe/import regardless of aggregator flag - Rename --aggregate-subnet-ids CLI flag to --import-subnet-ids - Rename aggregation_subnet_ids field to import_subnet_ids in chain.zig - Update help text: option adds to (not overrides) automatic validator subnet computation - Subscribe to import_subnet_ids at p2p level regardless of is-aggregator flag (proposer nodes also need these attestations to include in blocks) - Import gossip attestations for import_subnet_ids regardless of is_aggregator_enabled - Validator-derived subnets and aggregator fallback still gated behind is_aggregator_enabled Addresses review feedback from @g11tech on PR #685 * correct the implementation * fix: remove stale subnet_ids from ChainOpts init, fix subscribe_subnet_ids typo - Remove .subnet_ids from ChainOpts initializer in node.zig (field removed by g11tech) - Fix typo: self.subscribe_subnet_ids -> self.subscription_subnet_ids in cli/node.zig - Add allocator.free for subscription_subnet_ids in Node.deinit * fix: resolve CI failure - pass empty subnet slice to net.run() in simtest call sites * undo changing network run * fix the subnet subscription * fix: implement subscription_subnet_ids computation from validator_ids and aggregation_subnet_ids * remove redundant compute of validator subnet ids * fix the edge condition * fix build * fix build --------- Co-authored-by: zclawz Co-authored-by: zclawz Co-authored-by: zclawz Co-authored-by: harkamal Co-authored-by: Anshal Shukla <53994948+anshalshukla@users.noreply.github.com> Co-authored-by: zclawz --- build.zig | 2 +- pkgs/cli/src/main.zig | 2 ++ pkgs/cli/src/node.zig | 26 ++++++++++++++++++++++++++ pkgs/node/src/chain.zig | 31 ++++++++++++++++++++----------- pkgs/node/src/node.zig | 35 +++++++++++++++++++++++++++++++---- 5 files changed, 80 insertions(+), 16 deletions(-) diff --git a/build.zig b/build.zig index 86231ce37..b1e71d1f4 100644 --- a/build.zig +++ b/build.zig @@ -852,7 +852,7 @@ fn build_zkvm_targets( .root_module = b.createModule(.{ .root_source_file = b.path("build/risc0.zig"), .target = host_target, - .optimize = .ReleaseSafe, + .optimize = .ReleaseFast, }), }); const run_risc0_postbuild_gen_step = b.addRunArtifact(risc0_postbuild_gen); diff --git a/pkgs/cli/src/main.zig b/pkgs/cli/src/main.zig index ed61b3a38..caf9478f4 100644 --- a/pkgs/cli/src/main.zig +++ b/pkgs/cli/src/main.zig @@ -72,6 +72,7 @@ pub const NodeCommand = struct { @"checkpoint-sync-url": ?[]const u8 = null, @"is-aggregator": bool = false, @"attestation-committee-count": ?u64 = null, + @"aggregate-subnet-ids": ?[]const u8 = null, pub const __shorts__ = .{ .help = .h, @@ -93,6 +94,7 @@ pub const NodeCommand = struct { .@"checkpoint-sync-url" = "URL to fetch finalized checkpoint state from for checkpoint sync (e.g., http://localhost:5052/lean/v0/states/finalized)", .@"is-aggregator" = "Enable aggregator mode for committee signature aggregation", .@"attestation-committee-count" = "Number of attestation committees (subnets); overrides config.yaml ATTESTATION_COMMITTEE_COUNT", + .@"aggregate-subnet-ids" = "Comma-separated list of subnet ids to additionally subscribe and aggregate gossip attestations (e.g. '0,1,2'); adds to automatic computation from validator ids", .help = "Show help information for the node command", }; }; diff --git a/pkgs/cli/src/node.zig b/pkgs/cli/src/node.zig index 5cbabf025..594ad253b 100644 --- a/pkgs/cli/src/node.zig +++ b/pkgs/cli/src/node.zig @@ -79,6 +79,8 @@ pub const NodeOptions = struct { genesis_spec: types.GenesisSpec, metrics_enable: bool, is_aggregator: bool, + /// If aggregator, additional subnet ids to import and aggregate + aggregation_subnet_ids: ?[]u32 = null, api_port: u16, metrics_port: u16, local_priv_key: []const u8, @@ -98,6 +100,7 @@ pub const NodeOptions = struct { allocator.free(self.validator_assignments); allocator.free(self.local_priv_key); allocator.free(self.hash_sig_key_dir); + if (self.aggregation_subnet_ids) |ids| allocator.free(ids); self.node_registry.deinit(); allocator.destroy(self.node_registry); } @@ -298,6 +301,7 @@ pub const Node = struct { .logger_config = options.logger_config, .node_registry = options.node_registry, .is_aggregator = options.is_aggregator, + .aggregation_subnet_ids = options.aggregation_subnet_ids, }); errdefer self.beam_node.deinit(); @@ -635,6 +639,28 @@ pub fn buildStartOptions( opts.checkpoint_sync_url = node_cmd.@"checkpoint-sync-url"; opts.is_aggregator = node_cmd.@"is-aggregator"; + // Parse --aggregate-subnet-ids (comma-separated list of subnet ids, e.g. "0,1,2") + // Require --is-aggregator to be set when --aggregate-subnet-ids is provided. + if (node_cmd.@"aggregate-subnet-ids" != null and !node_cmd.@"is-aggregator") { + std.log.err("--aggregate-subnet-ids requires --is-aggregator to be set", .{}); + return error.AggregateSubnetIdsRequiresIsAggregator; + } + if (node_cmd.@"aggregate-subnet-ids") |subnet_ids_str| { + var list: std.ArrayList(u32) = .empty; + var it = std.mem.splitScalar(u8, subnet_ids_str, ','); + while (it.next()) |part| { + const trimmed = std.mem.trim(u8, part, " "); + if (trimmed.len == 0) continue; + const id = std.fmt.parseInt(u32, trimmed, 10) catch |err| { + std.log.warn("invalid subnet id '{s}': {any}", .{ trimmed, err }); + list.deinit(allocator); + return error.InvalidSubnetId; + }; + try list.append(allocator, id); + } + opts.aggregation_subnet_ids = try list.toOwnedSlice(allocator); + } + // Resolve attestation_committee_count: CLI flag takes precedence over config.yaml. if (node_cmd.@"attestation-committee-count") |count| { opts.attestation_committee_count = count; diff --git a/pkgs/node/src/chain.zig b/pkgs/node/src/chain.zig index 743881566..082a4fdf6 100644 --- a/pkgs/node/src/chain.zig +++ b/pkgs/node/src/chain.zig @@ -49,6 +49,7 @@ pub const ChainOpts = struct { db: database.Db, node_registry: *const NodeNameRegistry, force_block_production: bool = false, + // import and aggregate all subnet ids subscribed to is_aggregator: bool = false, }; @@ -765,17 +766,25 @@ pub const BeamChain = struct { } }; - // Process validated attestation - self.onGossipAttestation(signed_attestation) catch |err| { - zeam_metrics.metrics.lean_attestations_invalid_total.incr(.{ .source = "gossip" }) catch {}; - self.logger.err("attestation processing error: {any}", .{err}); - return err; - }; - self.logger.info("processed gossip attestation for slot={d} validator={d}{f}", .{ - slot, - validator_id, - validator_node_name, - }); + if (self.is_aggregator_enabled) { + // Process validated attestation + self.onGossipAttestation(signed_attestation) catch |err| { + zeam_metrics.metrics.lean_attestations_invalid_total.incr(.{ .source = "gossip" }) catch {}; + self.logger.err("attestation processing error: {any}", .{err}); + return err; + }; + self.logger.info("processed gossip attestation for slot={d} validator={d}{f}", .{ + slot, + validator_id, + validator_node_name, + }); + } else { + self.logger.debug("skipping gossip attestation import (not aggregator): subnet={d} slot={d} validator={d}", .{ + signed_attestation.subnet_id, + slot, + validator_id, + }); + } zeam_metrics.metrics.lean_attestations_valid_total.incr(.{ .source = "gossip" }) catch {}; return .{}; }, diff --git a/pkgs/node/src/node.zig b/pkgs/node/src/node.zig index 05329d44c..ec7d91a43 100644 --- a/pkgs/node/src/node.zig +++ b/pkgs/node/src/node.zig @@ -40,6 +40,8 @@ const NodeOpts = struct { logger_config: *zeam_utils.ZeamLoggerConfig, node_registry: *const NodeNameRegistry, is_aggregator: bool = false, + /// Explicit subnet ids to subscribe and import gossip attestations for aggregation + aggregation_subnet_ids: ?[]const u32 = null, }; pub const BeamNode = struct { @@ -52,6 +54,8 @@ pub const BeamNode = struct { last_interval: isize, logger: zeam_utils.ModuleLogger, node_registry: *const NodeNameRegistry, + /// Explicitly configured subnet ids for attestation import (adds to validator-derived subnets). + aggregation_subnet_ids: ?[]const u32 = null, const Self = @This(); @@ -108,6 +112,7 @@ pub const BeamNode = struct { .last_interval = -1, .logger = opts.logger_config.logger(.node), .node_registry = opts.node_registry, + .aggregation_subnet_ids = opts.aggregation_subnet_ids, }; chain.setPruneCachedBlocksCallback(self, pruneCachedBlocksCallback); @@ -1296,20 +1301,42 @@ pub const BeamNode = struct { const committee_count = self.chain.config.spec.attestation_committee_count; if (committee_count > 0) { + // Collect all subnets to subscribe into a deduplication set. + var seen_subnets = std.AutoHashMap(u32, void).init(self.allocator); + defer seen_subnets.deinit(); + + // Always subscribe to explicitly specified import subnet ids for aggregation irrespective of + // validators + if (self.chain.is_aggregator_enabled) { + if (self.aggregation_subnet_ids) |explicit_subnets| { + for (explicit_subnets) |subnet_id| { + if (seen_subnets.contains(subnet_id)) continue; + try seen_subnets.put(subnet_id, {}); + try topics_list.append(self.allocator, .{ .kind = .attestation, .subnet_id = subnet_id }); + } + } + } + + // Additionally subscribe to these subnets for validators to create mesh network for attestations if (self.validator) |validator| { - var seen_subnets = std.AutoHashMap(u32, void).init(self.allocator); - defer seen_subnets.deinit(); for (validator.ids) |validator_id| { const subnet_id = try types.computeSubnetId(@intCast(validator_id), committee_count); if (seen_subnets.contains(@intCast(subnet_id))) continue; try seen_subnets.put(@intCast(subnet_id), {}); try topics_list.append(self.allocator, .{ .kind = .attestation, .subnet_id = @intCast(subnet_id) }); } - } else { - // Keep parity with leanSpec: passive nodes subscribe to subnet 0. + } + + // If no subnets were added yet (aggregator but no explicit ids and no + // validators registered), fall back to subnet 0 to keep parity with leanSpec. + if (seen_subnets.count() == 0 and self.chain.is_aggregator_enabled) { try topics_list.append(self.allocator, .{ .kind = .attestation, .subnet_id = 0 }); } } + // if no committee count specified and still aggregator, all are in subnet 0 + else if (self.chain.is_aggregator_enabled) { + try topics_list.append(self.allocator, .{ .kind = .attestation, .subnet_id = 0 }); + } const topics_slice = try topics_list.toOwnedSlice(self.allocator); defer self.allocator.free(topics_slice); From c79b25d24370f8c715bb7ab7aacad129ddafe8bf Mon Sep 17 00:00:00 2001 From: Parthasarathy Ramanujam <1627026+ch4r10t33r@users.noreply.github.com> Date: Wed, 25 Mar 2026 08:29:00 +0000 Subject: [PATCH 22/24] fix: skip pre-finalized attestations instead of aborting block import (#692) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * cli: fix checkpoint sync panic on chunked HTTP responses When the SSZ state grows beyond ~3 MB the server switches from sending a Content-Length response to Transfer-Encoding: chunked. The previous body-reading loop called readSliceShort which internally goes through: readSliceShort → readVec → defaultReadVec → contentLengthStream contentLengthStream accesses reader.state.body_remaining_content_length but that field is not active for chunked responses (state is 'ready'), causing a panic: thread 1 panic: access of union field 'body_remaining_content_length' while field 'ready' is active Replace the manual request/response loop with client.fetch() using a std.Io.Writer.Allocating as the response_writer. fetch() calls response.readerDecompressing() + streamRemaining() which dispatches through chunkedStream or contentLengthStream correctly based on the actual transfer encoding used by the server. * types: skip pre-finalized attestations instead of aborting block import After checkpoint sync, incoming blocks may carry attestations whose target slots predate the finalized anchor. IsJustifiableSlot correctly identifies these as non-justifiable, but the `try` was propagating the error fatally, causing the entire block import to fail. This creates a cascading gap: block N fails → blocks N+1..M fail (missing parent) → no epoch-boundary attestations accumulate → justified checkpoint never advances → forkchoice stays stuck in `initing` indefinitely. Fix: catch InvalidJustifiableSlot and treat it as `false`. The attestation is then silently skipped via the existing !is_target_justifiable check, exactly as all other non-viable attestations (unknown source/target/head, stale slot, etc.) are handled. The block imports successfully, the chain catches up, and the node exits the initing state. Update the test that was asserting the old (buggy) error-propagation behaviour to instead assert that process_attestations succeeds. * node: fix fc_initing deadlock after checkpoint sync After checkpoint sync the forkchoice starts in the initing state and waits for a first justified checkpoint before declaring itself ready. The status-response sync handler was checking getSyncStatus() and treating fc_initing the same as synced — doing nothing. This created a deadlock: the node never requested blocks from ahead peers because it was in fc_initing, and it could never leave fc_initing because no blocks were imported. Fix the deadlock in two places: 1. Status-response handler: add an explicit fc_initing branch that requests the peer's head block when the peer is ahead of our anchor slot. This mirrors the behind_peers branch but uses head_slot for the comparison (finalized_slot is not yet meaningful in fc_initing). 2. Periodic sync refresh: every SYNC_STATUS_REFRESH_INTERVAL_SLOTS (8) slots, re-send our status to all connected peers when not synced. This recovers from the case where all peers were already connected before the fix was deployed, so no new connection event fires and the status-response handler would never be re-triggered. --- pkgs/node/src/constants.zig | 6 ++++ pkgs/node/src/node.zig | 62 ++++++++++++++++++++++++++++++++++++- pkgs/types/src/state.zig | 15 ++++----- 3 files changed, 75 insertions(+), 8 deletions(-) diff --git a/pkgs/node/src/constants.zig b/pkgs/node/src/constants.zig index 5a4c5c6bc..105b771c3 100644 --- a/pkgs/node/src/constants.zig +++ b/pkgs/node/src/constants.zig @@ -31,3 +31,9 @@ pub const MAX_FC_CHAIN_PRINT_DEPTH = 5; // with a different peer. 2 slots at 4s/slot is generous for latency while ensuring // stuck sync chains recover quickly. pub const RPC_REQUEST_TIMEOUT_SECONDS: i64 = 8; + +// How often to re-send status requests to all connected peers when not synced. +// Ensures that already-connected peers are probed again after a restart, and that +// a node stuck in fc_initing can recover without waiting for new peer connections. +// 8 slots = 32 seconds at 4s/slot. +pub const SYNC_STATUS_REFRESH_INTERVAL_SLOTS: u64 = 8; diff --git a/pkgs/node/src/node.zig b/pkgs/node/src/node.zig index ec7d91a43..97799aba3 100644 --- a/pkgs/node/src/node.zig +++ b/pkgs/node/src/node.zig @@ -785,7 +785,33 @@ pub const BeamNode = struct { }; } }, - .synced, .no_peers, .fc_initing => {}, + .fc_initing => { + // Forkchoice is still initializing (checkpoint-sync or DB restore). + // We need blocks to reach the first justified checkpoint and exit + // fc_initing. Without this branch the node deadlocks: it stays in + // fc_initing because no blocks arrive, and no blocks arrive because + // the sync code skips fc_initing. + // Treat this exactly like behind_peers: if the peer's head is ahead + // of our anchor, request their head block to start the parent chain. + if (status_resp.head_slot > self.chain.forkChoice.head.slot) { + self.logger.info("peer {s}{f} is ahead during fc init (peer_head={d} > our_head={d}), requesting head block 0x{x}", .{ + status_ctx.peer_id, + self.node_registry.getNodeNameFromPeerId(status_ctx.peer_id), + status_resp.head_slot, + self.chain.forkChoice.head.slot, + &status_resp.head_root, + }); + const roots = [_]types.Root{status_resp.head_root}; + self.fetchBlockByRoots(&roots, 0) catch |err| { + self.logger.warn("failed to initiate sync from peer {s}{f} during fc init: {any}", .{ + status_ctx.peer_id, + self.node_registry.getNodeNameFromPeerId(status_ctx.peer_id), + err, + }); + }; + } + }, + .synced, .no_peers => {}, } }, else => { @@ -1122,6 +1148,18 @@ pub const BeamNode = struct { } const interval_in_slot = interval % constants.INTERVALS_PER_SLOT; + + // Periodically re-send status to all connected peers when not synced. + // This recovers from the case where peers were already connected when + // the node was in fc_initing and the status-exchange-triggered sync + // was skipped (now fixed, but existing connections need a re-probe). + if (interval_in_slot == 0 and slot % constants.SYNC_STATUS_REFRESH_INTERVAL_SLOTS == 0) { + switch (self.chain.getSyncStatus()) { + .fc_initing, .behind_peers => self.refreshSyncFromPeers(), + .synced, .no_peers => {}, + } + } + if (interval_in_slot == 2) { if (self.chain.maybeAggregateCommitteeSignaturesOnInterval(interval) catch |e| { self.logger.err("error producing aggregations at slot={d} interval={d}: {any}", .{ slot, interval, e }); @@ -1139,6 +1177,28 @@ pub const BeamNode = struct { self.last_interval = itime_intervals; } + /// Re-send our status to every connected peer. + /// + /// Called periodically when the node is not yet synced so that peers + /// already connected before the sync mechanism became aware of them + /// (e.g., after a restart or while stuck in fc_initing) get another + /// chance to report their head and trigger block fetching. + fn refreshSyncFromPeers(self: *Self) void { + const status = self.chain.getStatus(); + const handler = self.getReqRespResponseHandler(); + var it = self.network.connected_peers.iterator(); + while (it.next()) |entry| { + const peer_id = entry.key_ptr.*; + _ = self.network.sendStatusToPeer(peer_id, status, handler) catch |err| { + self.logger.warn("failed to refresh status to peer {s}{f}: {any}", .{ + peer_id, + self.node_registry.getNodeNameFromPeerId(peer_id), + err, + }); + }; + } + } + fn sweepTimedOutRequests(self: *Self) void { const current_time = std.time.timestamp(); const timed_out = self.network.getTimedOutRequests(current_time, constants.RPC_REQUEST_TIMEOUT_SECONDS) catch |err| { diff --git a/pkgs/types/src/state.zig b/pkgs/types/src/state.zig index 62dbf23f8..59722aed6 100644 --- a/pkgs/types/src/state.zig +++ b/pkgs/types/src/state.zig @@ -430,7 +430,7 @@ pub const BeamState = struct { const has_known_root = has_correct_source_root and has_correct_target_root; const target_not_ahead = target_slot <= source_slot; - const is_target_justifiable = try utils.IsJustifiableSlot(self.latest_finalized.slot, target_slot); + const is_target_justifiable = utils.IsJustifiableSlot(self.latest_finalized.slot, target_slot) catch false; if (!is_source_justified or // not present in 3sf mini but once a target is justified no need to run loop @@ -802,7 +802,7 @@ fn makeBlock( }; } -test "process_attestations invalid justifiable slot returns error without panic" { +test "process_attestations silently skips pre-finalized target attestations" { var logger_config = zeam_utils.getTestLoggerConfig(); const logger = logger_config.logger(null); var state = try makeGenesisState(std.testing.allocator, 3); @@ -821,7 +821,6 @@ test "process_attestations invalid justifiable slot returns error without panic" const slot_0_root = try state.historical_block_hashes.get(0); const slot_1_root = try state.historical_block_hashes.get(1); - // Seed pending justifications so error unwind exercises map cleanup with allocated entries. var pending_roots = try JustificationRoots.init(std.testing.allocator); errdefer pending_roots.deinit(); try pending_roots.append(slot_1_root); @@ -839,6 +838,10 @@ test "process_attestations invalid justifiable slot returns error without panic" state.latest_finalized = .{ .root = slot_1_root, .slot = 1 }; + // Attestation whose target (slot=0) is before the finalized slot (slot=1). + // This is normal during post-checkpoint-sync catchup: a block may carry + // attestations referencing epoch boundaries from before the anchor. + // Such attestations must be silently skipped, not abort the block import. var att = try makeAggregatedAttestation( std.testing.allocator, &[_]usize{ 0, 1 }, @@ -859,10 +862,8 @@ test "process_attestations invalid justifiable slot returns error without panic" try attestations_list.append(att); att_transferred = true; - try std.testing.expectError( - StateTransitionError.InvalidJustifiableSlot, - state.process_attestations(std.testing.allocator, attestations_list, logger, null), - ); + // Must succeed: the pre-finalized attestation is skipped, not an error. + try state.process_attestations(std.testing.allocator, attestations_list, logger, null); } test "justified_slots do not include finalized boundary" { From 5ac1439b7e5a775c74072836c5974fd5624f6d8c Mon Sep 17 00:00:00 2001 From: zclawz Date: Thu, 26 Mar 2026 01:48:26 +0530 Subject: [PATCH 23/24] refactor: update store to key by AttestationData (#656) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: validateAttestationData refactor + validate aggregated attestation (closes #654) * refactor: update store - key signatures/payloads by AttestationData instead of SignatureKey (closes #636) Implements leanEthereum/leanSpec#436: - Replace SignatureKey(validator_id, data_root) -> StoredSignature map with AttestationData -> HashMap(ValidatorIndex, StoredSignature) (SignaturesMap) - Replace SignatureKey -> AggregatedPayloadsList map with AttestationData -> AggregatedPayloadsList (AggregatedPayloadsMap) - Remove attestation_data_by_root reverse-lookup map (no longer needed) - Remove SignatureKey type (replaced by AttestationData as key) - Update computeAggregatedSignatures Phase 1 to look up by AttestationData - Update computeAggregatedSignatures Phase 2 greedy set-cover to use per-data candidate list instead of per-validator lookup - Update pruneStaleAttestationData to iterate gossip_signatures directly and prune by target.slot (renamed to prunePayloadMapBySlot) - Update all call sites: forkchoice.zig, fork_choice_runner.zig, mock.zig, block_signatures_testing.zig * fix: resolve CI failure - update tests to use AttestationData as map key after store refactor * refactor: address PR #656 review comments - Remove 'Replaces the old...' and 'not per-validator' comments from block.zig (three sites); reviewer noted these transition comments are not needed in the final code. - Make GossipSignaturesInnerMap a proper struct wrapping AutoHashMap with init/put/get/deinit/iterator/count methods, so callers don't need to manage the inner map lifecycle and the initialization pattern (getOrPut + init if not found) stays consistent across all sites. - Remove validator_ids parameter from storeAggregatedPayload — payloads are now keyed by AttestationData so the param was unused. Update all call sites: chain.zig (×2), forkchoice.zig test helper, fork_choice_runner.zig. - Scope signatures_mutex in storeAggregatedPayload using a {} block with defer unlock, releasing the lock as soon as the map write completes. Consistent with the scoped-lock pattern used everywhere else in the file. * refactor: eliminate attestation_list from computeAggregatedSignatures; remove validateAttestation wrapper aggregateCommitteeSignaturesUnlocked / computeAggregatedSignatures: The old flow built a flat attestation list from gossip_signatures to pass as the first arg to computeAggregatedSignatures, which then re-grouped it by AttestationData. Since gossip_signatures is already keyed by AttestationData → GossipSignaturesInnerMap this round-trip was pure overhead. New flow: - computeAggregatedSignatures(validators, signatures_map, aggregated_payloads) drops the attestations_list parameter entirely. - Iterates the *union* of signatures_map and aggregated_payloads keys so groups that only exist in aggregated_payloads (individual sigs pruned but stored proof available) are still processed. - Phase 1 iterates the inner map directly (validator_id → stored_sig) with no intermediate grouping or root-index lookup. - Before Phase 2 greedy set-cover, seeds from proof participants not already in sigmap_available so proofs covering validators absent from signatures_map are still included. aggregateCommitteeSignaturesUnlocked (forkchoice.zig): - Drops the attestations ArrayList build loop entirely. - Calls computeAggregatedSignatures directly on gossip_signatures. chain.zig: - Removes getProposalAttestations() call (was only needed as the first arg); aggregation now reads gossip_signatures directly. - Removes validateAttestation() thin wrapper (review comment #r2955207656); callers should use validateAttestationData directly. All call sites updated: chain.zig, mock.zig, block_signatures_testing.zig. * fix: resolve CI failure - apply zig fmt to chain.zig (trailing blank line) * improve code and simplify aggregation * optimize blcok building * add checks in block production * refactor: move greedy proposal attestation logic to forkchoice.getProposalAttestations Move the fixed-point greedy proof selection loop from chain.produceBlock into forkchoice.getProposalAttestations, which now takes pre_state, slot, proposer_index, and parent_root as inputs. This encapsulates the attestation selection strategy inside the fork choice module, where it belongs. chain.produceBlock now simply calls forkChoice.getProposalAttestations and uses the returned AggregatedAttestations + AttestationSignatures. --------- Co-authored-by: zclawz Co-authored-by: zclawz Co-authored-by: Anshal Shukla <53994948+anshalshukla@users.noreply.github.com> Co-authored-by: anshalshukla Co-authored-by: zclawz --- pkgs/node/src/chain.zig | 198 ++- pkgs/node/src/forkchoice.zig | 483 ++++--- .../src/runner/fork_choice_runner.zig | 11 +- pkgs/state-transition/src/mock.zig | 64 +- pkgs/types/src/block.zig | 509 +++----- pkgs/types/src/block_signatures_testing.zig | 1118 ----------------- pkgs/types/src/lib.zig | 3 +- 7 files changed, 655 insertions(+), 1731 deletions(-) delete mode 100644 pkgs/types/src/block_signatures_testing.zig diff --git a/pkgs/node/src/chain.zig b/pkgs/node/src/chain.zig index 082a4fdf6..2bf2ee5a2 100644 --- a/pkgs/node/src/chain.zig +++ b/pkgs/node/src/chain.zig @@ -375,9 +375,6 @@ pub const BeamChain = struct { // This ensures the proposer builds on the latest proposal head derived // from known aggregated payloads. const proposal_head = try self.forkChoice.getProposalHead(opts.slot); - const attestations = try self.forkChoice.getProposalAttestations(); - defer self.allocator.free(attestations); - const parent_root = proposal_head.root; const pre_state = self.states.get(parent_root) orelse return BlockProductionError.MissingPreState; @@ -389,48 +386,30 @@ pub const BeamChain = struct { const post_state = post_state_opt.?; try types.sszClone(self.allocator, types.BeamState, pre_state.*, post_state); - // Use the two-phase aggregation algorithm: - // Phase 1: Collect individual signatures from gossip_signatures - // Phase 2: Fallback to latest_known_aggregated_payloads using greedy set-cover - var aggregation = try types.AggregatedAttestationsResult.init(self.allocator); + const building_timer = zeam_metrics.lean_pq_sig_aggregated_signatures_building_time_seconds.start(); + const proposal_atts = try self.forkChoice.getProposalAttestations(pre_state, opts.slot, opts.proposer_index, parent_root); + _ = building_timer.observe(); + + var agg_attestations = proposal_atts.attestations; var agg_att_cleanup = true; - var agg_sig_cleanup = true; errdefer if (agg_att_cleanup) { - for (aggregation.attestations.slice()) |*att| { - att.deinit(); - } - aggregation.attestations.deinit(); + for (agg_attestations.slice()) |*att| att.deinit(); + agg_attestations.deinit(); }; + + var attestation_signatures = proposal_atts.signatures; + var agg_sig_cleanup = true; errdefer if (agg_sig_cleanup) { - for (aggregation.attestation_signatures.slice()) |*sig| { - sig.deinit(); - } - aggregation.attestation_signatures.deinit(); + for (attestation_signatures.slice()) |*sig| sig.deinit(); + attestation_signatures.deinit(); }; - // Lock mutex only for the duration of computeAggregatedSignatures to avoid deadlock: - // forkChoice.onBlock/updateHead acquire forkChoice.mutex, while onSignedAttestation - // acquires mutex then signatures_mutex. Holding signatures_mutex across onBlock/updateHead - // would allow: (this thread: signatures_mutex -> mutex) vs (gossip: mutex -> signatures_mutex). - { - self.forkChoice.signatures_mutex.lock(); - defer self.forkChoice.signatures_mutex.unlock(); - - const building_timer = zeam_metrics.lean_pq_sig_aggregated_signatures_building_time_seconds.start(); - try aggregation.computeAggregatedSignatures( - attestations, - &pre_state.validators, - &self.forkChoice.gossip_signatures, - &self.forkChoice.latest_known_aggregated_payloads, - ); - _ = building_timer.observe(); - } // Record aggregated signature metrics - const num_agg_sigs = aggregation.attestation_signatures.len(); + const num_agg_sigs = attestation_signatures.len(); zeam_metrics.metrics.lean_pq_sig_aggregated_signatures_total.incrBy(num_agg_sigs); var total_attestations_in_agg: u64 = 0; - for (aggregation.attestations.constSlice()) |agg_att| { + for (agg_attestations.constSlice()) |agg_att| { const bits_len = agg_att.aggregation_bits.len(); for (0..bits_len) |i| { if (agg_att.aggregation_bits.get(i) catch false) { @@ -450,13 +429,12 @@ pub const BeamChain = struct { .state_root = undefined, .body = types.BeamBlockBody{ // .execution_payload_header = .{ .timestamp = timestamp }, - .attestations = aggregation.attestations, + .attestations = agg_attestations, }, }; agg_att_cleanup = false; // Ownership moved to block.body.attestations errdefer block.deinit(); - var attestation_signatures = aggregation.attestation_signatures; agg_sig_cleanup = false; // Ownership moved to attestation_signatures errdefer { for (attestation_signatures.slice()) |*sig_group| { @@ -822,8 +800,14 @@ pub const BeamChain = struct { self.onGossipAggregatedAttestation(signed_aggregation) catch |err| { zeam_metrics.metrics.lean_attestations_invalid_total.incr(.{ .source = "aggregation" }) catch {}; - self.logger.warn("gossip aggregation processing error: {any}", .{err}); - return .{}; + switch (err) { + // Propagate unknown block errors to node.zig for context-aware logging + error.UnknownHeadBlock, error.UnknownSourceBlock, error.UnknownTargetBlock => return err, + else => { + self.logger.warn("gossip aggregation processing error: {any}", .{err}); + return .{}; + }, + } }; zeam_metrics.metrics.lean_attestations_valid_total.incr(.{ .source = "aggregation" }) catch {}; return .{}; @@ -965,7 +949,7 @@ pub const BeamChain = struct { for (validator_indices.items, 0..) |vi, i| { validator_ids[i] = @intCast(vi); } - self.forkChoice.storeAggregatedPayload(validator_ids, &aggregated_attestation.data, signature_proof.*, true) catch |e| { + self.forkChoice.storeAggregatedPayload(&aggregated_attestation.data, signature_proof.*, true) catch |e| { self.logger.warn("failed to store aggregated payload for attestation index={d}: {any}", .{ index, e }); }; } @@ -1466,6 +1450,9 @@ pub const BeamChain = struct { } pub fn onGossipAggregatedAttestation(self: *Self, signedAggregation: types.SignedAggregatedAttestation) !void { + // Validate the attestation data first (same rules as individual gossip attestations) + try self.validateAttestationData(signedAggregation.data, false); + try self.verifyAggregatedAttestation(signedAggregation); var validator_indices = try types.aggregationBitsToValidatorIndices(&signedAggregation.proof.participants, self.allocator); @@ -1490,7 +1477,7 @@ pub const BeamChain = struct { }; } - try self.forkChoice.storeAggregatedPayload(validator_ids, &signedAggregation.data, signedAggregation.proof, false); + try self.forkChoice.storeAggregatedPayload(&signedAggregation.data, signedAggregation.proof, false); } fn verifyAggregatedAttestation(self: *Self, signedAggregation: types.SignedAggregatedAttestation) !void { @@ -2432,6 +2419,131 @@ test "attestation processing - valid block attestation" { try beam_chain.onGossipAttestation(gossip_attestation); // Verify the attestation data was recorded for aggregation - const data_root = try valid_attestation.message.sszRoot(allocator); - try std.testing.expect(beam_chain.forkChoice.attestation_data_by_root.get(data_root) != null); + try std.testing.expect(beam_chain.forkChoice.gossip_signatures.get(valid_attestation.message) != null); +} + +test "produceBlock - greedy selection by latest slot is suboptimal when attestation references unseen block" { + // Demonstrates that selecting attestation_data entries by latest slot is not the + // best strategy for block production. An attestation_data with a higher slot may + // reference a block on a different fork that this node has never seen locally. + // The STF will skip such attestations (has_known_root check in process_attestations), + // wasting block space. Lower-slot attestations referencing locally-known blocks + // are the ones that actually contribute to justification. + var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); + defer arena_allocator.deinit(); + const allocator = arena_allocator.allocator(); + + const mock_chain = try stf.genMockChain(allocator, 3, null); + const spec_name = try allocator.dupe(u8, "beamdev"); + const chain_config = configs.ChainConfig{ + .id = configs.Chain.custom, + .genesis = mock_chain.genesis_config, + .spec = .{ + .preset = params.Preset.mainnet, + .name = spec_name, + .attestation_committee_count = 1, + }, + }; + var beam_state = mock_chain.genesis_state; + var zeam_logger_config = zeam_utils.getTestLoggerConfig(); + + var tmp_dir = std.testing.tmpDir(.{}); + defer tmp_dir.cleanup(); + const data_dir = try tmp_dir.dir.realpathAlloc(allocator, "."); + defer allocator.free(data_dir); + + var db = try database.Db.open(allocator, zeam_logger_config.logger(.database_test), data_dir); + defer db.deinit(); + + const connected_peers = try allocator.create(std.StringHashMap(PeerInfo)); + connected_peers.* = std.StringHashMap(PeerInfo).init(allocator); + + const test_registry = try allocator.create(NodeNameRegistry); + defer allocator.destroy(test_registry); + test_registry.* = NodeNameRegistry.init(allocator); + defer test_registry.deinit(); + + var beam_chain = try BeamChain.init(allocator, ChainOpts{ .config = chain_config, .anchorState = &beam_state, .nodeId = 0, .logger_config = &zeam_logger_config, .db = db, .node_registry = test_registry }, connected_peers); + defer beam_chain.deinit(); + + // Process blocks at slots 1 and 2 + for (1..mock_chain.blocks.len) |i| { + const signed_block = mock_chain.blocks[i]; + const block = signed_block.message.block; + try beam_chain.forkChoice.onInterval(block.slot * constants.INTERVALS_PER_SLOT, false); + const missing_roots = try beam_chain.onBlock(signed_block, .{ .pruneForkchoice = false }); + allocator.free(missing_roots); + } + + // After processing blocks 0-2, latest_justified should be at slot 1. + const justified_root = mock_chain.latestJustified[2].root; + + // att_data_unseen: higher slot, but references a block on a fork we haven't seen. + // A greedy-by-slot approach would prefer this over lower-slot alternatives. + const unknown_root = [_]u8{0xAB} ** 32; + const att_data_unseen = types.AttestationData{ + .slot = 2, + .head = .{ .root = unknown_root, .slot = 2 }, + .target = .{ .root = unknown_root, .slot = 2 }, + .source = .{ .root = justified_root, .slot = 1 }, + }; + + // att_data_known: references a locally-known block at slot 2. + const att_data_known = types.AttestationData{ + .slot = 1, + .head = .{ .root = mock_chain.blockRoots[2], .slot = 2 }, + .target = .{ .root = mock_chain.blockRoots[2], .slot = 2 }, + .source = .{ .root = justified_root, .slot = 1 }, + }; + + // Create mock proofs with all 4 validators participating + var proof_unseen = try types.AggregatedSignatureProof.init(allocator); + for (0..4) |i| { + try types.aggregationBitsSet(&proof_unseen.participants, i, true); + } + try beam_chain.forkChoice.storeAggregatedPayload(&att_data_unseen, proof_unseen, true); + + var proof_known = try types.AggregatedSignatureProof.init(allocator); + for (0..4) |i| { + try types.aggregationBitsSet(&proof_known.participants, i, true); + } + try beam_chain.forkChoice.storeAggregatedPayload(&att_data_known, proof_known, true); + + // Produce block at slot 3 (proposer_index = 3 % 4 = 3) + const proposal_slot: types.Slot = 3; + const num_validators: u64 = @intCast(mock_chain.genesis_config.numValidators()); + var produced = try beam_chain.produceBlock(.{ + .slot = proposal_slot, + .proposer_index = proposal_slot % num_validators, + }); + defer produced.deinit(); + + // The block should contain attestation entries for both att_data since both + // have source matching the justified checkpoint. + const block_attestations = produced.block.body.attestations.constSlice(); + + // However, after STF processing, only the attestation referencing the known + // block contributes to justification. The unseen-fork attestation is silently + // skipped by process_attestations (has_known_root check). + // + // This demonstrates why greedy-by-latest-slot is suboptimal: if we had only + // selected the highest-slot attestation (att_data_unseen at slot=2), the block + // would contribute zero attestation weight. The lower-slot attestation + // (att_data_known at slot=1) is the one that actually matters. + const post_state = beam_chain.states.get(produced.blockRoot) orelse @panic("post state should exist"); + try std.testing.expect(post_state.latest_justified.slot >= 1); + + // Count how many attestation entries reference the unseen vs known block + var unseen_count: usize = 0; + var known_count: usize = 0; + for (block_attestations) |att| { + if (std.mem.eql(u8, &att.data.target.root, &unknown_root)) { + unseen_count += 1; + } else if (std.mem.eql(u8, &att.data.target.root, &mock_chain.blockRoots[2])) { + known_count += 1; + } + } + // Only the known attestation is included in the block + try std.testing.expect(unseen_count == 0); + try std.testing.expect(known_count > 0); } diff --git a/pkgs/node/src/forkchoice.zig b/pkgs/node/src/forkchoice.zig index bbe2aff3c..c806df124 100644 --- a/pkgs/node/src/forkchoice.zig +++ b/pkgs/node/src/forkchoice.zig @@ -264,7 +264,6 @@ pub const ForkChoiceParams = struct { }; // Use shared signature map types from types package -const SignatureKey = types.SignatureKey; const StoredSignature = types.StoredSignature; const SignaturesMap = types.SignaturesMap; const StoredAggregatedPayload = types.StoredAggregatedPayload; @@ -293,11 +292,9 @@ pub const ForkChoice = struct { logger: zeam_utils.ModuleLogger, // Thread-safe access protection mutex: Thread.RwLock, - // Per-validator XMSS signatures learned from gossip, keyed by (validator_id, attestation_data_root) + // Per-validator XMSS signatures learned from gossip, keyed by AttestationData. + // Each AttestationData maps to a per-validator-id inner map of signatures. gossip_signatures: SignaturesMap, - // Attestation data indexed by data root, used to reconstruct attestations from payloads. - // Entries are pruned once their target checkpoint is at or before finalization. - attestation_data_by_root: std.AutoHashMap(types.Root, types.AttestationData), // Aggregated signature proofs pending processing. // These payloads are "new" and migrate to known payloads via interval ticks. latest_new_aggregated_payloads: AggregatedPayloadsMap, @@ -360,7 +357,6 @@ pub const ForkChoice = struct { const attestations = std.AutoHashMap(usize, AttestationTracker).init(allocator); const deltas: std.ArrayList(isize) = .empty; const gossip_signatures = SignaturesMap.init(allocator); - const attestation_data_by_root = std.AutoHashMap(types.Root, types.AttestationData).init(allocator); const latest_new_aggregated_payloads = AggregatedPayloadsMap.init(allocator); const latest_known_aggregated_payloads = AggregatedPayloadsMap.init(allocator); @@ -377,7 +373,6 @@ pub const ForkChoice = struct { .logger = opts.logger, .mutex = Thread.RwLock{}, .gossip_signatures = gossip_signatures, - .attestation_data_by_root = attestation_data_by_root, .latest_new_aggregated_payloads = latest_new_aggregated_payloads, .latest_known_aggregated_payloads = latest_known_aggregated_payloads, .signatures_mutex = .{}, @@ -473,7 +468,6 @@ pub const ForkChoice = struct { self.signatures_mutex.lock(); defer self.signatures_mutex.unlock(); self.gossip_signatures.deinit(); - self.attestation_data_by_root.deinit(); // Deinit each list in the aggregated payloads maps var it_known = self.latest_known_aggregated_payloads.iterator(); @@ -937,31 +931,174 @@ pub const ForkChoice = struct { } // Internal unlocked version - assumes caller holds lock - fn getProposalAttestationsUnlocked(self: *Self) ![]types.Attestation { - var included_attestations: std.ArrayList(types.Attestation) = .empty; - - const latest_justified = self.fcStore.latest_justified; - - // TODO naive strategy to include all attestations that are consistent with the latest justified - // replace by the other mini 3sf simple strategy to loop and see if justification happens and - // till no further attestations can be added - var att_iter = self.attestations.iterator(); - while (att_iter.next()) |entry| { - const validator_id = entry.key_ptr.*; - const attestation_data = (entry.value_ptr.latestKnown orelse ProtoAttestation{}).attestation_data; - - if (attestation_data) |att_data| { - if (std.mem.eql(u8, &latest_justified.root, &att_data.source.root)) { - const attestation = types.Attestation{ - .data = att_data, - .validator_id = validator_id, - }; - try included_attestations.append(self.allocator, attestation); + pub const ProposalAttestationsResult = struct { + attestations: types.AggregatedAttestations, + signatures: types.AttestationSignatures, + }; + + fn getProposalAttestationsUnlocked( + self: *Self, + pre_state: *const types.BeamState, + slot: types.Slot, + proposer_index: types.ValidatorIndex, + parent_root: [32]u8, + ) !ProposalAttestationsResult { + var agg_attestations = try types.AggregatedAttestations.init(self.allocator); + var agg_att_cleanup = true; + errdefer if (agg_att_cleanup) { + for (agg_attestations.slice()) |*att| att.deinit(); + agg_attestations.deinit(); + }; + + var attestation_signatures = try types.AttestationSignatures.init(self.allocator); + var agg_sig_cleanup = true; + errdefer if (agg_sig_cleanup) { + for (attestation_signatures.slice()) |*sig| sig.deinit(); + attestation_signatures.deinit(); + }; + + // Fixed-point attestation collection with greedy proof selection. + // + // For the current latest_justified checkpoint, find matching attestation_data + // entries in latest_known_aggregated_payloads and greedily select proofs that + // maximize new validator coverage. Then apply STF to check if justification + // changed. If it did, look for entries matching the new justified checkpoint + // and repeat. If no matching entries exist or justification did not change, + // block production is done. + var current_justified_root = pre_state.latest_justified.root; + var processed_att_data = std.AutoHashMap(types.AttestationData, void).init(self.allocator); + defer processed_att_data.deinit(); + + while (true) { + // Find all attestation_data entries whose source matches the current justified checkpoint + // and greedily select proofs maximizing new validator coverage for each. + // Collect entries and sort by target slot for deterministic processing order. + const MapEntry = struct { + att_data: *types.AttestationData, + payloads: *types.AggregatedPayloadsList, + }; + var sorted_entries: std.ArrayList(MapEntry) = .empty; + defer sorted_entries.deinit(self.allocator); + + var payload_it = self.latest_known_aggregated_payloads.iterator(); + while (payload_it.next()) |entry| { + if (!std.mem.eql(u8, ¤t_justified_root, &entry.key_ptr.source.root)) continue; + if (!self.protoArray.indices.contains(entry.key_ptr.head.root)) continue; + if (processed_att_data.contains(entry.key_ptr.*)) continue; + try sorted_entries.append(self.allocator, .{ .att_data = entry.key_ptr, .payloads = entry.value_ptr }); + } + + std.mem.sort(MapEntry, sorted_entries.items, {}, struct { + fn lessThan(_: void, a: MapEntry, b: MapEntry) bool { + return a.att_data.target.slot < b.att_data.target.slot; + } + }.lessThan); + + const found_entries = sorted_entries.items.len > 0; + + for (sorted_entries.items) |map_entry| { + try processed_att_data.put(map_entry.att_data.*, {}); + + const att_data = map_entry.att_data.*; + const payloads = map_entry.payloads; + + // Greedy proof selection: each iteration picks the proof covering + // the most uncovered validators until all are covered. + var covered = try std.DynamicBitSet.initEmpty(self.allocator, 0); + defer covered.deinit(); + + while (true) { + var best_proof: ?*const types.AggregatedSignatureProof = null; + var best_new_coverage: usize = 0; + + for (payloads.items) |*stored| { + var new_coverage: usize = 0; + for (0..stored.proof.participants.len()) |i| { + if (stored.proof.participants.get(i) catch false) { + if (i >= covered.capacity() or !covered.isSet(i)) { + new_coverage += 1; + } + } + } + if (new_coverage > best_new_coverage) { + best_new_coverage = new_coverage; + best_proof = &stored.proof; + } + } + + if (best_proof == null or best_new_coverage == 0) break; + + var cloned_proof: types.AggregatedSignatureProof = undefined; + try types.sszClone(self.allocator, types.AggregatedSignatureProof, best_proof.?.*, &cloned_proof); + errdefer cloned_proof.deinit(); + + var att_bits = try types.AggregationBits.init(self.allocator); + errdefer att_bits.deinit(); + + for (0..cloned_proof.participants.len()) |i| { + if (cloned_proof.participants.get(i) catch false) { + try types.aggregationBitsSet(&att_bits, i, true); + if (i >= covered.capacity()) { + try covered.resize(i + 1, false); + } + covered.set(i); + } + } + + try agg_attestations.append(.{ .aggregation_bits = att_bits, .data = att_data }); + try attestation_signatures.append(cloned_proof); + } + } + + if (!found_entries) break; + + // Build candidate block with all accumulated attestations and apply STF + // to check if justification changed. + var candidate_atts = try types.AggregatedAttestations.init(self.allocator); + defer { + for (candidate_atts.slice()) |*att| att.deinit(); + candidate_atts.deinit(); + } + + for (agg_attestations.constSlice()) |agg_att| { + var cloned_bits = try types.AggregationBits.init(self.allocator); + errdefer cloned_bits.deinit(); + for (0..agg_att.aggregation_bits.len()) |i| { + if (agg_att.aggregation_bits.get(i) catch false) { + try types.aggregationBitsSet(&cloned_bits, i, true); + } } + try candidate_atts.append(.{ .aggregation_bits = cloned_bits, .data = agg_att.data }); + } + + const candidate_block = types.BeamBlock{ + .slot = slot, + .proposer_index = proposer_index, + .parent_root = parent_root, + .state_root = std.mem.zeroes([32]u8), + .body = .{ .attestations = candidate_atts }, + }; + + var candidate_state: types.BeamState = undefined; + try types.sszClone(self.allocator, types.BeamState, pre_state.*, &candidate_state); + defer candidate_state.deinit(); + + try candidate_state.process_slots(self.allocator, slot, self.logger); + try candidate_state.process_block(self.allocator, candidate_block, self.logger, null); + + if (!std.mem.eql(u8, &candidate_state.latest_justified.root, ¤t_justified_root)) { + // Justification changed - look for entries matching the new checkpoint + current_justified_root = candidate_state.latest_justified.root; + continue; } + + // Justification unchanged or no new entries - block production done + break; } - return included_attestations.toOwnedSlice(self.allocator); + agg_att_cleanup = false; + agg_sig_cleanup = false; + return .{ .attestations = agg_attestations, .signatures = attestation_signatures }; } // Internal unlocked version - assumes caller holds lock @@ -1153,20 +1290,12 @@ pub const ForkChoice = struct { const validator_id = signed_attestation.validator_id; const attestation_slot = attestation_data.slot; - // Store attestation data by root for later aggregation - const data_root = try attestation_data.sszRoot(self.allocator); var gossip_signatures_count: usize = 0; { self.signatures_mutex.lock(); defer self.signatures_mutex.unlock(); - try self.attestation_data_by_root.put(data_root, attestation_data); - // Store the gossip signature for later aggregation - const sig_key = SignatureKey{ - .validator_id = validator_id, - .data_root = data_root, - }; - try self.gossip_signatures.put(sig_key, .{ + try self.gossip_signatures.addSignature(attestation_data, validator_id, .{ .slot = attestation_slot, .signature = signed_attestation.signature, }); @@ -1233,43 +1362,32 @@ pub const ForkChoice = struct { /// For gossip attestations, also updates fork choice attestation trackers. pub fn storeAggregatedPayload( self: *Self, - validator_ids: []const types.ValidatorIndex, attestation_data: *const types.AttestationData, proof: types.AggregatedSignatureProof, is_from_block: bool, ) !void { - const data_root = try attestation_data.sszRoot(self.allocator); + var cloned_proof: types.AggregatedSignatureProof = undefined; + try types.sszClone(self.allocator, types.AggregatedSignatureProof, proof, &cloned_proof); + errdefer cloned_proof.deinit(); { self.signatures_mutex.lock(); defer self.signatures_mutex.unlock(); - try self.attestation_data_by_root.put(data_root, attestation_data.*); - const target_map = if (is_from_block) &self.latest_known_aggregated_payloads else &self.latest_new_aggregated_payloads; - for (validator_ids) |validator_id| { - const sig_key = SignatureKey{ - .validator_id = validator_id, - .data_root = data_root, - }; - const gop = try target_map.getOrPut(sig_key); - if (!gop.found_existing) { - gop.value_ptr.* = .empty; - } - - var cloned_proof: types.AggregatedSignatureProof = undefined; - try types.sszClone(self.allocator, types.AggregatedSignatureProof, proof, &cloned_proof); - errdefer cloned_proof.deinit(); - - try gop.value_ptr.append(self.allocator, .{ - .slot = attestation_data.slot, - .proof = cloned_proof, - }); + const gop = try target_map.getOrPut(attestation_data.*); + if (!gop.found_existing) { + gop.value_ptr.* = .empty; } + + try gop.value_ptr.append(self.allocator, .{ + .slot = attestation_data.slot, + .proof = cloned_proof, + }); } } @@ -1279,14 +1397,11 @@ pub const ForkChoice = struct { const state = state_opt orelse return try self.allocator.alloc(types.SignedAggregatedAttestation, 0); - var attestations: std.ArrayList(types.Attestation) = .{}; - defer attestations.deinit(self.allocator); - // Capture counts for metrics update outside lock scope var new_payloads_count: usize = 0; var gossip_sigs_count: usize = 0; - var results: std.ArrayList(types.SignedAggregatedAttestation) = .{}; + var results: std.ArrayList(types.SignedAggregatedAttestation) = .empty; errdefer { for (results.items) |*signed| { signed.deinit(); @@ -1298,92 +1413,53 @@ pub const ForkChoice = struct { self.signatures_mutex.lock(); defer self.signatures_mutex.unlock(); - var sig_it = self.gossip_signatures.iterator(); - while (sig_it.next()) |entry| { - const sig_key = entry.key_ptr.*; - const attestation_data = self.attestation_data_by_root.get(sig_key.data_root) orelse continue; - try attestations.append(self.allocator, .{ - .validator_id = sig_key.validator_id, - .data = attestation_data, - }); - } + // Collect keys first to avoid modifying map during iteration + var att_data_keys: std.ArrayList(types.AttestationData) = .empty; + defer att_data_keys.deinit(self.allocator); - var aggregation = try types.AggregatedAttestationsResult.init(self.allocator); - var agg_att_cleanup = true; - var agg_sig_cleanup = true; - errdefer if (agg_att_cleanup) { - for (aggregation.attestations.slice()) |*att| { - att.deinit(); - } - aggregation.attestations.deinit(); - }; - errdefer if (agg_sig_cleanup) { - for (aggregation.attestation_signatures.slice()) |*sig| { - sig.deinit(); + { + var it = self.gossip_signatures.iterator(); + while (it.next()) |entry| { + try att_data_keys.append(self.allocator, entry.key_ptr.*); } - aggregation.attestation_signatures.deinit(); - }; - - try aggregation.computeAggregatedSignatures( - attestations.items, - &state.validators, - &self.gossip_signatures, - null, - ); - - const agg_attestations = aggregation.attestations.constSlice(); - const agg_signatures = aggregation.attestation_signatures.constSlice(); - - for (agg_attestations, 0..) |agg_att, index| { - const proof = agg_signatures[index]; - const data_root = try agg_att.data.sszRoot(self.allocator); + } - try self.attestation_data_by_root.put(data_root, agg_att.data); + for (att_data_keys.items) |att_data| { + const inner_map_ptr = self.gossip_signatures.getPtr(att_data) orelse continue; - var validator_indices = try types.aggregationBitsToValidatorIndices(&proof.participants, self.allocator); - defer validator_indices.deinit(self.allocator); + var proof = try types.aggregateInnerMap(self.allocator, inner_map_ptr, att_data, &state.validators); + errdefer proof.deinit(); - for (validator_indices.items) |validator_index| { - const sig_key = SignatureKey{ - .validator_id = @intCast(validator_index), - .data_root = data_root, - }; - const gop = try self.latest_new_aggregated_payloads.getOrPut(sig_key); - if (!gop.found_existing) { - gop.value_ptr.* = .empty; - } + // Store proof keyed by AttestationData + const gop = try self.latest_new_aggregated_payloads.getOrPut(att_data); + if (!gop.found_existing) { + gop.value_ptr.* = .empty; + } + { var cloned_proof: types.AggregatedSignatureProof = undefined; try types.sszClone(self.allocator, types.AggregatedSignatureProof, proof, &cloned_proof); errdefer cloned_proof.deinit(); try gop.value_ptr.append(self.allocator, .{ - .slot = agg_att.data.slot, + .slot = att_data.slot, .proof = cloned_proof, }); - // Align with leanSpec: once this signature is represented by an aggregated - // payload, remove it from the gossip signature map to prevent re-aggregation. - _ = self.gossip_signatures.remove(sig_key); } + // Align with leanSpec: once signatures for this data are represented by an + // aggregated payload, remove the whole inner map to prevent re-aggregation. + self.gossip_signatures.removeAndDeinit(att_data); + var output_proof: types.AggregatedSignatureProof = undefined; try types.sszClone(self.allocator, types.AggregatedSignatureProof, proof, &output_proof); errdefer output_proof.deinit(); try results.append(self.allocator, .{ - .data = agg_att.data, + .data = att_data, .proof = output_proof, }); - } - agg_att_cleanup = false; - agg_sig_cleanup = false; - for (aggregation.attestations.slice()) |*att| { - att.deinit(); + proof.deinit(); } - aggregation.attestations.deinit(); - for (aggregation.attestation_signatures.slice()) |*sig| { - sig.deinit(); - } - aggregation.attestation_signatures.deinit(); // Capture counts before lock is released new_payloads_count = self.latest_new_aggregated_payloads.count(); @@ -1418,45 +1494,27 @@ pub const ForkChoice = struct { self.signatures_mutex.lock(); defer self.signatures_mutex.unlock(); - var stale_roots = std.AutoHashMap(types.Root, void).init(self.allocator); - defer stale_roots.deinit(); - - var data_it = self.attestation_data_by_root.iterator(); - while (data_it.next()) |entry| { - if (entry.value_ptr.target.slot <= finalized_slot) { - try stale_roots.put(entry.key_ptr.*, {}); - } - } - - if (stale_roots.count() == 0) return; - - // Remove stale attestation data entries. - var stale_it = stale_roots.iterator(); - while (stale_it.next()) |entry| { - _ = self.attestation_data_by_root.remove(entry.key_ptr.*); - } - - // Remove gossip signatures tied to stale data roots. - var gossip_keys_to_remove: std.ArrayList(SignatureKey) = .empty; + // Collect stale AttestationData keys from gossip_signatures (target.slot <= finalized) + var gossip_keys_to_remove: std.ArrayList(types.AttestationData) = .empty; defer gossip_keys_to_remove.deinit(self.allocator); var gossip_it = self.gossip_signatures.iterator(); while (gossip_it.next()) |entry| { - if (stale_roots.contains(entry.key_ptr.data_root)) { + if (entry.key_ptr.target.slot <= finalized_slot) { try gossip_keys_to_remove.append(self.allocator, entry.key_ptr.*); } } - for (gossip_keys_to_remove.items) |sig_key| { - _ = self.gossip_signatures.remove(sig_key); + + for (gossip_keys_to_remove.items) |data| { + self.gossip_signatures.removeAndDeinit(data); } - const removed_known = try prunePayloadMapByRoots(self.allocator, &self.latest_known_aggregated_payloads, &stale_roots); - const removed_new = try prunePayloadMapByRoots(self.allocator, &self.latest_new_aggregated_payloads, &stale_roots); + const removed_known = try prunePayloadMapBySlot(self.allocator, &self.latest_known_aggregated_payloads, finalized_slot); + const removed_new = try prunePayloadMapBySlot(self.allocator, &self.latest_new_aggregated_payloads, finalized_slot); self.logger.debug( - "pruned stale attestation data: roots={d} gossip={d} payloads_known={d} payloads_new={d} finalized_slot={d}", + "pruned stale attestation data: gossip={d} payloads_known={d} payloads_new={d} finalized_slot={d}", .{ - stale_roots.count(), gossip_keys_to_remove.items.len, removed_known, removed_new, @@ -1465,18 +1523,18 @@ pub const ForkChoice = struct { ); } - fn prunePayloadMapByRoots( + fn prunePayloadMapBySlot( allocator: Allocator, payloads: *AggregatedPayloadsMap, - stale_roots: *const std.AutoHashMap(types.Root, void), + finalized_slot: types.Slot, ) !usize { - var keys_to_remove: std.ArrayList(SignatureKey) = .{}; + var keys_to_remove: std.ArrayList(types.AttestationData) = .{}; defer keys_to_remove.deinit(allocator); var removed_total: usize = 0; var it = payloads.iterator(); while (it.next()) |entry| { - if (!stale_roots.contains(entry.key_ptr.data_root)) continue; + if (entry.key_ptr.target.slot > finalized_slot) continue; for (entry.value_ptr.items) |*stored| { stored.proof.deinit(); @@ -1485,8 +1543,8 @@ pub const ForkChoice = struct { try keys_to_remove.append(allocator, entry.key_ptr.*); } - for (keys_to_remove.items) |sig_key| { - if (payloads.fetchRemove(sig_key)) |kv| { + for (keys_to_remove.items) |data| { + if (payloads.fetchRemove(data)) |kv| { var mutable_val = kv.value; mutable_val.deinit(allocator); } @@ -1627,10 +1685,16 @@ pub const ForkChoice = struct { // READ-ONLY API - SHARED LOCK - pub fn getProposalAttestations(self: *Self) ![]types.Attestation { + pub fn getProposalAttestations( + self: *Self, + pre_state: *const types.BeamState, + slot: types.Slot, + proposer_index: types.ValidatorIndex, + parent_root: [32]u8, + ) !ProposalAttestationsResult { self.mutex.lockShared(); defer self.mutex.unlockShared(); - return self.getProposalAttestationsUnlocked(); + return self.getProposalAttestationsUnlocked(pre_state, slot, proposer_index, parent_root); } pub fn getAttestationTarget(self: *Self) !types.Checkpoint { @@ -1858,7 +1922,7 @@ test "forkchoice block tree" { test "aggregateCommitteeSignatures prunes aggregated gossip signatures" { const allocator = std.testing.allocator; - const validator_count: usize = 4; + const validator_count: usize = 8; const num_blocks: usize = 1; var key_manager = try keymanager.getTestKeyManager(allocator, validator_count, num_blocks); @@ -1915,19 +1979,32 @@ test "aggregateCommitteeSignatures prunes aggregated gossip signatures" { .slot = 0, }, }; - const attestation = types.Attestation{ - .validator_id = 0, - .data = attestation_data, - }; - const signature = try key_manager.signAttestation(&attestation, allocator); + var found_unsorted = false; + for (0..validator_count) |validator_id| { + const attestation = types.Attestation{ + .validator_id = validator_id, + .data = attestation_data, + }; + const signature = try key_manager.signAttestation(&attestation, allocator); - try fork_choice.onSignedAttestation(.{ - .validator_id = 0, - .message = attestation_data, - .signature = signature, - }); + try fork_choice.onSignedAttestation(.{ + .validator_id = validator_id, + .message = attestation_data, + .signature = signature, + }); + + if (!found_unsorted) { + const inner_map_ptr = fork_choice.gossip_signatures.getPtr(attestation_data) orelse continue; + const iter_order = try collectInnerMapOrder(allocator, inner_map_ptr); + defer allocator.free(iter_order); + if (iter_order.len >= 2 and !isSortedAsc(iter_order)) { + found_unsorted = true; + break; + } + } + } + try std.testing.expect(found_unsorted); - const data_root = try attestation_data.sszRoot(allocator); const aggregations = try fork_choice.aggregateCommitteeSignatures(&mock_chain.genesis_state); defer { for (aggregations) |*signed_aggregation| { @@ -1938,10 +2015,48 @@ test "aggregateCommitteeSignatures prunes aggregated gossip signatures" { try std.testing.expectEqual(@as(usize, 1), aggregations.len); try std.testing.expectEqual(@as(usize, 0), fork_choice.gossip_signatures.count()); - try std.testing.expect(fork_choice.latest_new_aggregated_payloads.get(.{ - .validator_id = 0, - .data_root = data_root, - }) != null); + try std.testing.expect(fork_choice.latest_new_aggregated_payloads.get(attestation_data) != null); + + const aggregation = aggregations[0]; + var validator_indices = try types.aggregationBitsToValidatorIndices(&aggregation.proof.participants, allocator); + defer validator_indices.deinit(allocator); + + const xmss_mod = @import("@zeam/xmss"); + const pk_handles = try allocator.alloc(*const xmss_mod.HashSigPublicKey, validator_indices.items.len); + defer allocator.free(pk_handles); + + for (validator_indices.items, 0..) |validator_index, i| { + pk_handles[i] = try key_manager.getPublicKeyHandle(validator_index); + } + + var message_hash: [32]u8 = undefined; + try zeam_utils.hashTreeRoot(types.AttestationData, aggregation.data, &message_hash, allocator); + try aggregation.proof.verify(pk_handles, &message_hash, aggregation.data.slot); +} + +fn collectInnerMapOrder( + allocator: Allocator, + inner_map: *const types.SignaturesMap.InnerMap, +) ![]usize { + const len = inner_map.count(); + const order = try allocator.alloc(usize, len); + var idx: usize = 0; + var it = inner_map.iterator(); + while (it.next()) |entry| { + order[idx] = @intCast(entry.key_ptr.*); + idx += 1; + } + return order; +} + +fn isSortedAsc(values: []const usize) bool { + if (values.len <= 1) return true; + var prev = values[0]; + for (values[1..]) |value| { + if (value < prev) return false; + prev = value; + } + return true; } // Helper function to create a deterministic test root filled with a specific byte @@ -2103,7 +2218,6 @@ test "getCanonicalAncestorAtDepth and getCanonicalityAnalysis" { .logger = module_logger, .mutex = Thread.RwLock{}, .gossip_signatures = SignaturesMap.init(allocator), - .attestation_data_by_root = std.AutoHashMap(types.Root, types.AttestationData).init(allocator), .latest_new_aggregated_payloads = AggregatedPayloadsMap.init(allocator), .latest_known_aggregated_payloads = AggregatedPayloadsMap.init(allocator), .signatures_mutex = std.Thread.Mutex{}, @@ -2112,7 +2226,6 @@ test "getCanonicalAncestorAtDepth and getCanonicalityAnalysis" { defer fork_choice.attestations.deinit(); defer fork_choice.deltas.deinit(fork_choice.allocator); defer fork_choice.gossip_signatures.deinit(); - defer fork_choice.attestation_data_by_root.deinit(); defer deinitAggregatedPayloadsMap(allocator, &fork_choice.latest_known_aggregated_payloads); defer deinitAggregatedPayloadsMap(allocator, &fork_choice.latest_new_aggregated_payloads); @@ -2378,8 +2491,7 @@ fn stageAggregatedAttestation( try types.aggregationBitsSet(&proof.participants, @intCast(signed_attestation.validator_id), true); - const validator_ids = [_]types.ValidatorIndex{signed_attestation.validator_id}; - try fork_choice.storeAggregatedPayload(&validator_ids, &signed_attestation.message, proof, false); + try fork_choice.storeAggregatedPayload(&signed_attestation.message, proof, false); } // Rebase tests build ForkChoice structs in helper functions that outlive the helper scope. @@ -2456,7 +2568,6 @@ fn buildTestTreeWithMockChain(allocator: Allocator, mock_chain: anytype) !struct .logger = module_logger, .mutex = Thread.RwLock{}, .gossip_signatures = SignaturesMap.init(allocator), - .attestation_data_by_root = std.AutoHashMap(types.Root, types.AttestationData).init(allocator), .latest_new_aggregated_payloads = AggregatedPayloadsMap.init(allocator), .latest_known_aggregated_payloads = AggregatedPayloadsMap.init(allocator), .signatures_mutex = std.Thread.Mutex{}, @@ -2493,7 +2604,6 @@ const RebaseTestContext = struct { errdefer test_data.fork_choice.attestations.deinit(); errdefer test_data.fork_choice.deltas.deinit(test_data.fork_choice.allocator); errdefer test_data.fork_choice.gossip_signatures.deinit(); - errdefer test_data.fork_choice.attestation_data_by_root.deinit(); errdefer test_data.fork_choice.latest_known_aggregated_payloads.deinit(); errdefer test_data.fork_choice.latest_new_aggregated_payloads.deinit(); @@ -2512,7 +2622,6 @@ const RebaseTestContext = struct { self.fork_choice.attestations.deinit(); self.fork_choice.deltas.deinit(self.allocator); self.fork_choice.gossip_signatures.deinit(); - self.fork_choice.attestation_data_by_root.deinit(); // Deinit each list in latest_known_aggregated_payloads var it_known = self.fork_choice.latest_known_aggregated_payloads.iterator(); while (it_known.next()) |entry| { @@ -3433,7 +3542,6 @@ test "rebase: heavy attestation load - all validators tracked correctly" { .logger = module_logger, .mutex = Thread.RwLock{}, .gossip_signatures = SignaturesMap.init(allocator), - .attestation_data_by_root = std.AutoHashMap(types.Root, types.AttestationData).init(allocator), .latest_new_aggregated_payloads = AggregatedPayloadsMap.init(allocator), .latest_known_aggregated_payloads = AggregatedPayloadsMap.init(allocator), .signatures_mutex = std.Thread.Mutex{}, @@ -3444,7 +3552,6 @@ test "rebase: heavy attestation load - all validators tracked correctly" { defer fork_choice.attestations.deinit(); defer fork_choice.deltas.deinit(fork_choice.allocator); defer fork_choice.gossip_signatures.deinit(); - defer fork_choice.attestation_data_by_root.deinit(); defer deinitAggregatedPayloadsMap(allocator, &fork_choice.latest_known_aggregated_payloads); defer deinitAggregatedPayloadsMap(allocator, &fork_choice.latest_new_aggregated_payloads); diff --git a/pkgs/spectest/src/runner/fork_choice_runner.zig b/pkgs/spectest/src/runner/fork_choice_runner.zig index 590910d5e..8fbed4b10 100644 --- a/pkgs/spectest/src/runner/fork_choice_runner.zig +++ b/pkgs/spectest/src/runner/fork_choice_runner.zig @@ -781,7 +781,7 @@ fn processBlockStep( validator_ids[i] = @intCast(vi); } - ctx.fork_choice.storeAggregatedPayload(validator_ids, &aggregated_attestation.data, proof_template, true) catch |err| { + ctx.fork_choice.storeAggregatedPayload(&aggregated_attestation.data, proof_template, true) catch |err| { std.debug.print( "fixture {s} case {s}{f}: failed to store aggregated payload ({s})\n", .{ fixture_path, case_name, formatStep(step_index), @errorName(err) }, @@ -819,9 +819,6 @@ fn processBlockStep( // Proposer attestation is treated as gossip and queued as a new aggregated payload. try ctx.fork_choice.onSignedAttestation(signed_attestation); - const proposer_data_root = try proposer_attestation.data.sszRoot(ctx.allocator); - try ctx.fork_choice.attestation_data_by_root.put(proposer_data_root, proposer_attestation.data); - var proposer_proof = types.AggregatedSignatureProof.init(ctx.allocator) catch |err| { std.debug.print( "fixture {s} case {s}{f}: failed to init proposer proof ({s})\n", @@ -839,11 +836,7 @@ fn processBlockStep( return FixtureError.InvalidFixture; }; - const sig_key = types.SignatureKey{ - .validator_id = proposer_attestation.validator_id, - .data_root = proposer_data_root, - }; - const gop = try ctx.fork_choice.latest_new_aggregated_payloads.getOrPut(sig_key); + const gop = try ctx.fork_choice.latest_new_aggregated_payloads.getOrPut(proposer_attestation.data); if (!gop.found_existing) { gop.value_ptr.* = .empty; } diff --git a/pkgs/state-transition/src/mock.zig b/pkgs/state-transition/src/mock.zig index 7d72e7b35..10b10fab7 100644 --- a/pkgs/state-transition/src/mock.zig +++ b/pkgs/state-transition/src/mock.zig @@ -6,7 +6,6 @@ const params = @import("@zeam/params"); const types = @import("@zeam/types"); const zeam_utils = @import("@zeam/utils"); const keymanager = @import("@zeam/key-manager"); -const xmss = @import("@zeam/xmss"); const transition = @import("./transition.zig"); @@ -277,7 +276,7 @@ pub fn genMockChain(allocator: Allocator, numBlocks: usize, from_genesis: ?types else => unreachable, } - // Build gossip signatures map from attestations + // Build gossip signatures map from attestations (keyed by AttestationData) var signatures_map = types.SignaturesMap.init(allocator); defer signatures_map.deinit(); @@ -285,37 +284,46 @@ pub fn genMockChain(allocator: Allocator, numBlocks: usize, from_genesis: ?types // Get the serialized signature bytes const sig_buffer = try key_manager.signAttestation(&attestation, allocator); - // Compute data root for the signature key - const data_root = try attestation.data.sszRoot(allocator); - - try signatures_map.put( - .{ .validator_id = attestation.validator_id, .data_root = data_root }, - .{ .slot = attestation.data.slot, .signature = sig_buffer }, - ); + try signatures_map.addSignature(attestation.data, attestation.validator_id, .{ + .slot = attestation.data.slot, + .signature = sig_buffer, + }); } - // Compute aggregated signatures using the shared method - var aggregation = try types.AggregatedAttestationsResult.init(allocator); + // Compute aggregated signatures directly from signatures map + var agg_attestations = try types.AggregatedAttestations.init(allocator); var agg_att_cleanup = true; - var agg_sig_cleanup = true; errdefer if (agg_att_cleanup) { - for (aggregation.attestations.slice()) |*att| { - att.deinit(); - } - aggregation.attestations.deinit(); + for (agg_attestations.slice()) |*att| att.deinit(); + agg_attestations.deinit(); }; + + var agg_signatures = try types.AttestationSignatures.init(allocator); + var agg_sig_cleanup = true; errdefer if (agg_sig_cleanup) { - for (aggregation.attestation_signatures.slice()) |*sig| { - sig.deinit(); - } - aggregation.attestation_signatures.deinit(); + for (agg_signatures.slice()) |*sig| sig.deinit(); + agg_signatures.deinit(); }; - try aggregation.computeAggregatedSignatures( - attestations.items, - &beam_state.validators, - &signatures_map, - null, // no pre-aggregated payloads in mock - ); + + var sig_it = signatures_map.iterator(); + while (sig_it.next()) |entry| { + const att_data = entry.key_ptr.*; + + var proof = try types.aggregateInnerMap(allocator, entry.value_ptr, att_data, &beam_state.validators); + errdefer proof.deinit(); + + // Clone participants for the attestation entry + var att_bits = try types.AggregationBits.init(allocator); + errdefer att_bits.deinit(); + for (0..proof.participants.len()) |i| { + if (proof.participants.get(i) catch false) { + try types.aggregationBitsSet(&att_bits, i, true); + } + } + + try agg_attestations.append(.{ .aggregation_bits = att_bits, .data = att_data }); + try agg_signatures.append(proof); + } const proposer_index = slot % genesis_config.numValidators(); var block = types.BeamBlock{ @@ -324,7 +332,7 @@ pub fn genMockChain(allocator: Allocator, numBlocks: usize, from_genesis: ?types .parent_root = parent_root, .state_root = state_root, .body = types.BeamBlockBody{ - .attestations = aggregation.attestations, + .attestations = agg_attestations, }, }; agg_att_cleanup = false; @@ -360,7 +368,7 @@ pub fn genMockChain(allocator: Allocator, numBlocks: usize, from_genesis: ?types ); const block_signatures = types.BlockSignatures{ - .attestation_signatures = aggregation.attestation_signatures, + .attestation_signatures = agg_signatures, .proposer_signature = proposer_sig, }; agg_sig_cleanup = false; diff --git a/pkgs/types/src/block.zig b/pkgs/types/src/block.zig index a29da5b42..6cd0250b0 100644 --- a/pkgs/types/src/block.zig +++ b/pkgs/types/src/block.zig @@ -25,7 +25,6 @@ const SIGSIZE = utils.SIGSIZE; const Root = utils.Root; const ZERO_HASH = utils.ZERO_HASH; const ZERO_SIGBYTES = utils.ZERO_SIGBYTES; -const Validators = validator.Validators; const bytesToHex = utils.BytesToHex; const json = std.json; @@ -33,20 +32,176 @@ const json = std.json; const freeJsonValue = utils.freeJsonValue; // signatures_map types for aggregation -/// SignatureKey is used to index signatures by (validator_id, data_root). -pub const SignatureKey = struct { - validator_id: ValidatorIndex, - data_root: Root, -}; -/// Stored signatures_map entry +/// Stored signatures_map entry: per-validator signature + slot metadata. pub const StoredSignature = struct { slot: Slot, signature: SIGBYTES, }; -/// Map type for signatures_map: SignatureKey -> individual XMSS signature bytes + slot metadata -pub const SignaturesMap = std.AutoHashMap(SignatureKey, StoredSignature); +/// Map type for gossip signatures: AttestationData -> per-validator signatures. +/// Wraps AutoHashMap to manage the lifecycle of inner maps and provide +/// convenience helpers for common operations. +pub const SignaturesMap = struct { + pub const InnerMap = std.AutoHashMap(ValidatorIndex, StoredSignature); + + const InnerHashMap = std.AutoHashMap(attestation.AttestationData, InnerMap); + + inner: InnerHashMap, + allocator: std.mem.Allocator, + + pub fn init(allocator: std.mem.Allocator) SignaturesMap { + return .{ + .inner = InnerHashMap.init(allocator), + .allocator = allocator, + }; + } + + /// Deinit all inner maps, then the outer map itself. + pub fn deinit(self: *SignaturesMap) void { + var it = self.inner.iterator(); + while (it.next()) |entry| entry.value_ptr.deinit(); + self.inner.deinit(); + } + + /// Look up (or create) the inner map for `att_data` and insert the signature. + pub fn addSignature( + self: *SignaturesMap, + att_data: attestation.AttestationData, + validator_id: utils.ValidatorIndex, + sig: StoredSignature, + ) !void { + const gop = try self.inner.getOrPut(att_data); + if (!gop.found_existing) { + gop.value_ptr.* = InnerMap.init(self.allocator); + } + try gop.value_ptr.put(validator_id, sig); + } + + pub fn getOrPut(self: *SignaturesMap, key: attestation.AttestationData) !InnerHashMap.GetOrPutResult { + return self.inner.getOrPut(key); + } + + pub fn getPtr(self: *const SignaturesMap, key: attestation.AttestationData) ?*InnerMap { + return self.inner.getPtr(key); + } + + pub fn get(self: *const SignaturesMap, key: attestation.AttestationData) ?InnerMap { + return self.inner.get(key); + } + + pub fn fetchRemove(self: *SignaturesMap, key: attestation.AttestationData) ?InnerHashMap.KV { + return self.inner.fetchRemove(key); + } + + /// Remove an entry and deinit its inner map. No-op if key not present. + pub fn removeAndDeinit(self: *SignaturesMap, key: attestation.AttestationData) void { + if (self.inner.fetchRemove(key)) |kv| { + var inner = kv.value; + inner.deinit(); + } + } + + pub fn put(self: *SignaturesMap, key: attestation.AttestationData, value: InnerMap) !void { + return self.inner.put(key, value); + } + + pub fn iterator(self: *const SignaturesMap) InnerHashMap.Iterator { + return self.inner.iterator(); + } + + pub fn count(self: *const SignaturesMap) InnerHashMap.Size { + return self.inner.count(); + } +}; + +/// Aggregate all individual gossip signatures in an inner map into a single proof. +/// The caller owns the returned proof and must call `deinit` on it. +pub fn AggregateInnerMap( + allocator: Allocator, + inner_map: *const SignaturesMap.InnerMap, + att_data: attestation.AttestationData, + validators: *const validator.Validators, +) !aggregation.AggregatedSignatureProof { + var message_hash: [32]u8 = undefined; + try zeam_utils.hashTreeRoot(attestation.AttestationData, att_data, &message_hash, allocator); + + var sigs: std.ArrayList(xmss.Signature) = .empty; + defer { + for (sigs.items) |*sig| sig.deinit(); + sigs.deinit(allocator); + } + + var pks: std.ArrayList(xmss.PublicKey) = .empty; + defer { + for (pks.items) |*pk| pk.deinit(); + pks.deinit(allocator); + } + + var participants = try attestation.AggregationBits.init(allocator); + var participants_cleanup = true; + errdefer if (participants_cleanup) participants.deinit(); + + const ValidatorEntry = struct { + validator_id: utils.ValidatorIndex, + stored_sig: *const StoredSignature, + }; + var validator_entries: std.ArrayList(ValidatorEntry) = .empty; + defer validator_entries.deinit(allocator); + + var it = inner_map.iterator(); + while (it.next()) |entry| { + try validator_entries.append(allocator, .{ + .validator_id = entry.key_ptr.*, + .stored_sig = entry.value_ptr, + }); + } + + std.mem.sort(ValidatorEntry, validator_entries.items, {}, struct { + fn lessThan(_: void, a: ValidatorEntry, b: ValidatorEntry) bool { + return a.validator_id < b.validator_id; + } + }.lessThan); + + for (validator_entries.items) |ve| { + const validator_idx: usize = @intCast(ve.validator_id); + + var sig = try xmss.Signature.fromBytes(ve.stored_sig.signature[0..]); + errdefer sig.deinit(); + + const val = try validators.get(ve.validator_id); + var pk = try xmss.PublicKey.fromBytes(&val.pubkey); + errdefer pk.deinit(); + + try attestation.aggregationBitsSet(&participants, validator_idx, true); + try sigs.append(allocator, sig); + try pks.append(allocator, pk); + } + + const num_sigs = sigs.items.len; + const sig_handles = try allocator.alloc(*const xmss.HashSigSignature, num_sigs); + defer allocator.free(sig_handles); + const pk_handles = try allocator.alloc(*const xmss.HashSigPublicKey, num_sigs); + defer allocator.free(pk_handles); + + for (sigs.items, 0..) |*sig, i| sig_handles[i] = sig.handle; + for (pks.items, 0..) |*pk, i| pk_handles[i] = pk.handle; + + var proof = try aggregation.AggregatedSignatureProof.init(allocator); + errdefer proof.deinit(); + + try aggregation.AggregatedSignatureProof.aggregate( + participants, + pk_handles, + sig_handles, + &message_hash, + @intCast(att_data.slot), + &proof, + ); + participants_cleanup = false; + + return proof; +} /// Stored aggregated payload entry pub const StoredAggregatedPayload = struct { @@ -57,8 +212,8 @@ pub const StoredAggregatedPayload = struct { /// List of aggregated payloads for a single key pub const AggregatedPayloadsList = std.ArrayList(StoredAggregatedPayload); -/// Map type for aggregated payloads: SignatureKey -> list of AggregatedSignatureProof -pub const AggregatedPayloadsMap = std.AutoHashMap(SignatureKey, AggregatedPayloadsList); +/// Map type for aggregated payloads: AttestationData -> list of AggregatedSignatureProof. +pub const AggregatedPayloadsMap = std.AutoHashMap(attestation.AttestationData, AggregatedPayloadsList); // Types pub const BeamBlockBody = struct { @@ -303,338 +458,6 @@ pub fn createBlockSignatures(allocator: Allocator, num_aggregated_attestations: }; } -pub const AggregatedAttestationsResult = struct { - attestations: AggregatedAttestations, - attestation_signatures: AttestationSignatures, - allocator: Allocator, - - const Self = @This(); - - pub fn init(allocator: Allocator) !Self { - var attestations_list = try AggregatedAttestations.init(allocator); - errdefer attestations_list.deinit(); - - var signatures_list = try AttestationSignatures.init(allocator); - errdefer signatures_list.deinit(); - - return .{ - .attestations = attestations_list, - .attestation_signatures = signatures_list, - .allocator = allocator, - }; - } - - /// Compute aggregated signatures using three-phase algorithm: - /// Phase 1: Collect individual signatures from signatures_map (chain: gossip_signatures) - /// Phase 2: Fallback to aggregated_payloads using greedy set-cover (if provided) - /// Phase 3: Remove signatures which are already coverd by stored prrofs and aggregate remaining signatures - pub fn computeAggregatedSignatures( - self: *Self, - attestations_list: []const Attestation, - validators: *const Validators, - signatures_map: *const SignaturesMap, - aggregated_payloads: ?*const AggregatedPayloadsMap, - ) !void { - const allocator = self.allocator; - - // Group attestations by data root using bitsets for validator tracking - const AttestationGroup = struct { - data: attestation.AttestationData, - data_root: Root, - validator_bits: std.DynamicBitSet, - }; - - var groups: std.ArrayList(AttestationGroup) = .empty; - defer { - for (groups.items) |*group| { - group.validator_bits.deinit(); - } - groups.deinit(allocator); - } - - var root_indices = std.AutoHashMap(Root, usize).init(allocator); - defer root_indices.deinit(); - - // Group attestations by data root - for (attestations_list) |att| { - const data_root = try att.data.sszRoot(allocator); - const vid: usize = @intCast(att.validator_id); - if (root_indices.get(data_root)) |group_index| { - var bits = &groups.items[group_index].validator_bits; - if (vid >= bits.capacity()) { - try bits.resize(vid + 1, false); - } - bits.set(vid); - } else { - var new_bits = try std.DynamicBitSet.initEmpty(allocator, vid + 1); - new_bits.set(vid); - try groups.append(allocator, .{ - .data = att.data, - .data_root = data_root, - .validator_bits = new_bits, - }); - try root_indices.put(data_root, groups.items.len - 1); - } - } - - // Process each group - for (groups.items) |*group| { - const data_root = group.data_root; - const epoch: u64 = group.data.slot; - var message_hash: [32]u8 = undefined; - try zeam_utils.hashTreeRoot(attestation.AttestationData, group.data, &message_hash, allocator); - - // Phase 1: Collect signatures from signatures_map - const max_validator = group.validator_bits.capacity(); - - var sigmap_sigs: std.ArrayList(xmss.Signature) = .empty; - defer { - for (sigmap_sigs.items) |*sig| { - sig.deinit(); - } - sigmap_sigs.deinit(allocator); - } - - var sigmap_pks: std.ArrayList(xmss.PublicKey) = .empty; - defer { - for (sigmap_pks.items) |*pk| { - pk.deinit(); - } - sigmap_pks.deinit(allocator); - } - - // Map from validator_id to index in signatures_map arrays - // Used to remove signatures from sigmap_sigs while aggregating which are already covered by stored proofs - var vid_to_sigmap_idx = try allocator.alloc(?usize, max_validator); - defer allocator.free(vid_to_sigmap_idx); - @memset(vid_to_sigmap_idx, null); - - // Bitsets for tracking validator states - var remaining = try std.DynamicBitSet.initEmpty(allocator, max_validator); - defer remaining.deinit(); - - var sigmap_available = try std.DynamicBitSet.initEmpty(allocator, max_validator); - defer sigmap_available.deinit(); - - // Track validators covered by stored proofs (to avoid redundancy with signatures_map) - var covered_by_stored = try std.DynamicBitSet.initEmpty(allocator, max_validator); - defer covered_by_stored.deinit(); - - // Attempt to collect each validator's signature from signatures_map - var validator_it = group.validator_bits.iterator(.{}); - while (validator_it.next()) |validator_id| { - const vid: ValidatorIndex = @intCast(validator_id); - if (signatures_map.get(.{ .validator_id = vid, .data_root = data_root })) |sig_entry| { - // Check if it's not a zero signature - if (!std.mem.eql(u8, &sig_entry.signature, &ZERO_SIGBYTES)) { - // Deserialize signature - var sig = xmss.Signature.fromBytes(&sig_entry.signature) catch { - remaining.set(validator_id); - continue; - }; - errdefer sig.deinit(); - - // Get public key from validator - if (validator_id >= validators.len()) { - sig.deinit(); - remaining.set(validator_id); - continue; - } - - const val = validators.get(validator_id) catch { - sig.deinit(); - remaining.set(validator_id); - continue; - }; - const pk = xmss.PublicKey.fromBytes(&val.pubkey) catch { - sig.deinit(); - remaining.set(validator_id); - continue; - }; - - vid_to_sigmap_idx[validator_id] = sigmap_sigs.items.len; - try sigmap_sigs.append(allocator, sig); - try sigmap_pks.append(allocator, pk); - sigmap_available.set(validator_id); - } else { - remaining.set(validator_id); - } - } else { - remaining.set(validator_id); - } - } - - // Phase 2: Fallback to aggregated_payloads using greedy set-cover - if (aggregated_payloads) |agg_payloads| { - // Temporary bitset for computing coverage - var proof_bits = try std.DynamicBitSet.initEmpty(allocator, max_validator); - defer proof_bits.deinit(); - - while (remaining.count() > 0) { - // Pick any remaining validator to look up proofs - const target_id = remaining.findFirstSet() orelse break; - const vid: ValidatorIndex = @intCast(target_id); - - // Remove the target_id from remaining if not covered by stored proofs - const candidates = agg_payloads.get(.{ .validator_id = vid, .data_root = data_root }) orelse { - remaining.unset(target_id); - continue; - }; - - if (candidates.items.len == 0) { - remaining.unset(target_id); - continue; - } - - // Find the proof covering the most remaining validators (greedy set-cover) - var best_proof: ?*const aggregation.AggregatedSignatureProof = null; - var max_coverage: usize = 0; - - for (candidates.items) |*stored| { - const proof = &stored.proof; - const max_participants = proof.participants.len(); - - // Reset and populate proof_bits from participants - proof_bits.setRangeValue(.{ .start = 0, .end = proof_bits.capacity() }, false); - if (max_participants > proof_bits.capacity()) { - try proof_bits.resize(max_participants, false); - } - - var coverage: usize = 0; - - for (0..max_participants) |i| { - if (proof.participants.get(i) catch false) { - // Count coverage of validators still in remaining (not yet covered by stored proofs) - if (i < remaining.capacity() and remaining.isSet(i)) { - proof_bits.set(i); - coverage += 1; - } - } - } - - if (coverage == 0) { - continue; - } - - if (coverage > max_coverage) { - max_coverage = coverage; - best_proof = proof; - } - } - - if (best_proof == null or max_coverage == 0) { - remaining.unset(target_id); - continue; - } - - // Clone and add the proof - var cloned_proof: aggregation.AggregatedSignatureProof = undefined; - try utils.sszClone(allocator, aggregation.AggregatedSignatureProof, best_proof.?.*, &cloned_proof); - errdefer cloned_proof.deinit(); - - // Create aggregated attestation matching the proof's participants - // and update tracking bitsets in a single pass - var att_bits = try attestation.AggregationBits.init(allocator); - errdefer att_bits.deinit(); - - for (0..cloned_proof.participants.len()) |i| { - if (cloned_proof.participants.get(i) catch false) { - try attestation.aggregationBitsSet(&att_bits, i, true); - if (i < remaining.capacity()) { - remaining.unset(i); - } - // Track ALL validators covered by stored proofs to remove from signatures_map later - if (i >= covered_by_stored.capacity()) { - try covered_by_stored.resize(i + 1, false); - } - covered_by_stored.set(i); - } - } - - try self.attestations.append(.{ .aggregation_bits = att_bits, .data = group.data }); - try self.attestation_signatures.append(cloned_proof); - } - } - - // Finally, aggregate signatures_map for validators NOT covered by stored proofs - // This avoids redundancy: if a validator is in a stored proof, don't include them in signatures_map aggregation - var usable_count: usize = 0; - var git = sigmap_available.iterator(.{}); - while (git.next()) |vid| { - if (vid >= covered_by_stored.capacity() or !covered_by_stored.isSet(vid)) { - usable_count += 1; - } - } - - if (usable_count > 0) { - var participants = try attestation.AggregationBits.init(allocator); - var participants_cleanup = true; - errdefer if (participants_cleanup) participants.deinit(); - - var pk_handles = try allocator.alloc(*const xmss.HashSigPublicKey, usable_count); - defer allocator.free(pk_handles); - var sig_handles = try allocator.alloc(*const xmss.HashSigSignature, usable_count); - defer allocator.free(sig_handles); - - // Iterate sigmap_available in order, skipping validators already in stored proofs - var handle_idx: usize = 0; - var git2 = sigmap_available.iterator(.{}); - while (git2.next()) |vid| { - // Skip if already covered by a stored proof - if (vid < covered_by_stored.capacity() and covered_by_stored.isSet(vid)) continue; - - try attestation.aggregationBitsSet(&participants, vid, true); - const sigmap_idx = vid_to_sigmap_idx[vid].?; - pk_handles[handle_idx] = sigmap_pks.items[sigmap_idx].handle; - sig_handles[handle_idx] = sigmap_sigs.items[sigmap_idx].handle; - handle_idx += 1; - } - - var proof = try aggregation.AggregatedSignatureProof.init(allocator); - errdefer proof.deinit(); - - try aggregation.AggregatedSignatureProof.aggregate( - participants, - pk_handles[0..handle_idx], - sig_handles[0..handle_idx], - &message_hash, - epoch, - &proof, - ); - participants_cleanup = false; // proof now owns participants buffer - - // Create aggregated attestation using proof's participants (which now owns the bits) - // We need to clone it since we're moving it into the attestation - var att_bits = try attestation.AggregationBits.init(allocator); - errdefer att_bits.deinit(); - - // Clone from proof.participants - const proof_participants_len = proof.participants.len(); - for (0..proof_participants_len) |i| { - if (proof.participants.get(i) catch false) { - try attestation.aggregationBitsSet(&att_bits, i, true); - } - } - - try self.attestations.append(.{ .aggregation_bits = att_bits, .data = group.data }); - try self.attestation_signatures.append(proof); - } - } - } - - pub fn deinit(self: *Self) void { - for (self.attestations.slice()) |*att| { - att.deinit(); - } - self.attestations.deinit(); - - for (self.attestation_signatures.slice()) |*sig_group| { - sig_group.deinit(); - } - self.attestation_signatures.deinit(); - } -}; - pub const BlockByRootRequest = struct { roots: ssz.utils.List(utils.Root, params.MAX_REQUEST_BLOCKS), diff --git a/pkgs/types/src/block_signatures_testing.zig b/pkgs/types/src/block_signatures_testing.zig deleted file mode 100644 index 7cb4758fb..000000000 --- a/pkgs/types/src/block_signatures_testing.zig +++ /dev/null @@ -1,1118 +0,0 @@ -const std = @import("std"); -const ssz = @import("ssz"); - -const params = @import("@zeam/params"); -const xmss = @import("@zeam/xmss"); -const zeam_utils = @import("@zeam/utils"); - -const aggregation = @import("./aggregation.zig"); -const attestation = @import("./attestation.zig"); -const mini_3sf = @import("./mini_3sf.zig"); -const state = @import("./state.zig"); -const utils = @import("./utils.zig"); -const validator = @import("./validator.zig"); - -const block = @import("./block.zig"); -const Allocator = std.mem.Allocator; -const SignaturesMap = block.SignaturesMap; -const AggregatedPayloadsMap = block.AggregatedPayloadsMap; -const ValidatorIndex = utils.ValidatorIndex; -const Root = utils.Root; -const ZERO_HASH = utils.ZERO_HASH; - -const SignatureKey = block.SignatureKey; -const AggregatedAttestationsResult = block.AggregatedAttestationsResult; -const AggregatedPayloadsList = block.AggregatedPayloadsList; - -// ============================================================================ -// Test helpers for computeAggregatedSignatures -// ============================================================================ - -const keymanager = @import("@zeam/key-manager"); - -const TestContext = struct { - allocator: std.mem.Allocator, - key_manager: keymanager.KeyManager, - validators: validator.Validators, - data_root: Root, - attestation_data: attestation.AttestationData, - - pub fn init(allocator: std.mem.Allocator, num_validators: usize) !TestContext { - var key_manager = try keymanager.getTestKeyManager(allocator, num_validators, 10); - errdefer key_manager.deinit(); - - // Create validators with proper pubkeys - var validators_list = try validator.Validators.init(allocator); - errdefer validators_list.deinit(); - - for (0..num_validators) |i| { - var pubkey: utils.Bytes52 = undefined; - _ = try key_manager.getPublicKeyBytes(@intCast(i), &pubkey); - try validators_list.append(.{ - .pubkey = pubkey, - .index = @intCast(i), - }); - } - - // Create common attestation data - const att_data = attestation.AttestationData{ - .slot = 5, - .head = .{ .root = [_]u8{1} ** 32, .slot = 5 }, - .target = .{ .root = [_]u8{1} ** 32, .slot = 5 }, - .source = .{ .root = ZERO_HASH, .slot = 0 }, - }; - - const data_root = try att_data.sszRoot(allocator); - - return TestContext{ - .allocator = allocator, - .key_manager = key_manager, - .validators = validators_list, - .data_root = data_root, - .attestation_data = att_data, - }; - } - - pub fn deinit(self: *TestContext) void { - self.validators.deinit(); - self.key_manager.deinit(); - } - - /// Create an attestation for a given validator - pub fn createAttestation(self: *const TestContext, validator_id: ValidatorIndex) attestation.Attestation { - return attestation.Attestation{ - .validator_id = validator_id, - .data = self.attestation_data, - }; - } - - /// Create attestation with custom data (for different groups) - pub fn createAttestationWithData(self: *const TestContext, validator_id: ValidatorIndex, data: attestation.AttestationData) attestation.Attestation { - _ = self; - return attestation.Attestation{ - .validator_id = validator_id, - .data = data, - }; - } - - /// Sign an attestation and add to signatures map - pub fn addToSignatureMap( - self: *TestContext, - signatures_map: *SignaturesMap, - validator_id: ValidatorIndex, - ) !void { - const att = self.createAttestation(validator_id); - const sig_bytes = try self.key_manager.signAttestation(&att, self.allocator); - try signatures_map.put( - .{ .validator_id = validator_id, .data_root = self.data_root }, - .{ .slot = self.attestation_data.slot, .signature = sig_bytes }, - ); - } - - /// Create an aggregated proof covering specified validators - pub fn createAggregatedProof( - self: *TestContext, - validator_ids: []const ValidatorIndex, - ) !aggregation.AggregatedSignatureProof { - // Create attestations and collect signatures - var sigs = std.ArrayList(xmss.Signature).init(self.allocator); - defer { - for (sigs.items) |*sig| sig.deinit(); - sigs.deinit(); - } - - var pks = std.ArrayList(xmss.PublicKey).init(self.allocator); - defer { - for (pks.items) |*pk| pk.deinit(); - pks.deinit(); - } - - for (validator_ids) |vid| { - const att = self.createAttestation(vid); - const sig_bytes = try self.key_manager.signAttestation(&att, self.allocator); - var sig = try xmss.Signature.fromBytes(&sig_bytes); - errdefer sig.deinit(); - - const val = try self.validators.get(@intCast(vid)); - var pk = try xmss.PublicKey.fromBytes(&val.pubkey); - errdefer pk.deinit(); - - try sigs.append(sig); - try pks.append(pk); - } - - // Build handle arrays - var pk_handles = try self.allocator.alloc(*const xmss.HashSigPublicKey, pks.items.len); - defer self.allocator.free(pk_handles); - var sig_handles = try self.allocator.alloc(*const xmss.HashSigSignature, sigs.items.len); - defer self.allocator.free(sig_handles); - - for (pks.items, 0..) |*pk, i| { - pk_handles[i] = pk.handle; - } - for (sigs.items, 0..) |*sig, i| { - sig_handles[i] = sig.handle; - } - - // Build participants bitset - var participants = try attestation.AggregationBits.init(self.allocator); - errdefer participants.deinit(); - for (validator_ids) |vid| { - try attestation.aggregationBitsSet(&participants, @intCast(vid), true); - } - - // Compute message hash - var message_hash: [32]u8 = undefined; - try zeam_utils.hashTreeRoot(attestation.AttestationData, self.attestation_data, &message_hash, self.allocator); - - // Aggregate - var proof = try aggregation.AggregatedSignatureProof.init(self.allocator); - errdefer proof.deinit(); - - try aggregation.AggregatedSignatureProof.aggregate( - participants, - pk_handles, - sig_handles, - &message_hash, - self.attestation_data.slot, - &proof, - ); - - return proof; - } - - /// Add an aggregated proof to the payloads map for a specific validator - pub fn addAggregatedPayload( - self: *TestContext, - payloads_map: *AggregatedPayloadsMap, - lookup_validator_id: ValidatorIndex, - proof: aggregation.AggregatedSignatureProof, - ) !void { - const key = SignatureKey{ .validator_id = lookup_validator_id, .data_root = self.data_root }; - const gop = try payloads_map.getOrPut(key); - if (!gop.found_existing) { - gop.value_ptr.* = AggregatedPayloadsList.init(self.allocator); - } - try gop.value_ptr.append(.{ - .slot = self.attestation_data.slot, - .proof = proof, - }); - } - - /// Helper to check if a bitset contains exactly the specified validators - pub fn checkParticipants(bits: *const attestation.AggregationBits, expected_validators: []const ValidatorIndex) !bool { - var count: usize = 0; - for (0..bits.len()) |i| { - if (try bits.get(i)) { - count += 1; - var found = false; - for (expected_validators) |vid| { - if (i == vid) { - found = true; - break; - } - } - if (!found) return false; - } - } - return count == expected_validators.len; - } -}; - -fn deinitSignaturesMap(map: *SignaturesMap) void { - map.deinit(); -} - -fn deinitPayloadsMap(map: *AggregatedPayloadsMap) void { - var it = map.valueIterator(); - while (it.next()) |list| { - for (list.items) |*item| { - item.proof.deinit(); - } - list.deinit(); - } - map.deinit(); -} - -// ============================================================================ -// Test 1: All 4 signatures in signatures_map (pure signatures_map) -// ============================================================================ -test "computeAggregatedSignatures: all 4 in signatures_map" { - const allocator = std.testing.allocator; - - var ctx = try TestContext.init(allocator, 4); - defer ctx.deinit(); - - // Create attestations for all 4 validators - var attestations_list = [_]attestation.Attestation{ - ctx.createAttestation(0), - ctx.createAttestation(1), - ctx.createAttestation(2), - ctx.createAttestation(3), - }; - - // Add all 4 signatures to signatures_map - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - try ctx.addToSignatureMap(&signatures_map, 0); - try ctx.addToSignatureMap(&signatures_map, 1); - try ctx.addToSignatureMap(&signatures_map, 2); - try ctx.addToSignatureMap(&signatures_map, 3); - - // No aggregated payloads - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - // Create aggregation context and compute - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Should have exactly 1 aggregated attestation covering all 4 validators - try std.testing.expectEqual(@as(usize, 1), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 1), agg_ctx.attestation_signatures.len()); - - const att_bits = &(try agg_ctx.attestations.get(0)).aggregation_bits; - try std.testing.expect(try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 0, 1, 2, 3 })); -} - -// ============================================================================ -// Test 2: 2 in signatures_map, 2 in aggregated_proof (clean split) -// ============================================================================ -test "computeAggregatedSignatures: 2 signatures_map, 2 in aggregated proof" { - const allocator = std.testing.allocator; - - var ctx = try TestContext.init(allocator, 4); - defer ctx.deinit(); - - // Create attestations for all 4 validators - var attestations_list = [_]attestation.Attestation{ - ctx.createAttestation(0), - ctx.createAttestation(1), - ctx.createAttestation(2), - ctx.createAttestation(3), - }; - - // Add signatures for validators 0, 1 only - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - try ctx.addToSignatureMap(&signatures_map, 0); - try ctx.addToSignatureMap(&signatures_map, 1); - - // Create aggregated proof for validators 2, 3 - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - const proof_2_3 = try ctx.createAggregatedProof(&[_]ValidatorIndex{ 2, 3 }); - // Add to both validator 2 and 3's lookup - try ctx.addAggregatedPayload(&payloads_map, 2, proof_2_3); - - // Create aggregation context and compute - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Should have exactly 2 aggregated attestations - try std.testing.expectEqual(@as(usize, 2), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 2), agg_ctx.attestation_signatures.len()); - - // Verify one covers 2,3 and one covers 0,1 - var found_0_1 = false; - var found_2_3 = false; - - for (0..agg_ctx.attestations.len()) |i| { - const att_bits = &(try agg_ctx.attestations.get(i)).aggregation_bits; - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 0, 1 })) { - found_0_1 = true; - } - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 2, 3 })) { - found_2_3 = true; - } - } - - try std.testing.expect(found_0_1); - try std.testing.expect(found_2_3); -} - -// ============================================================================ -// Test 3: 2 in signatures_map, all 4 in aggregated_proof (full overlap - no redundancy) -// When stored proof covers ALL validators, signatures_map aggregation is skipped -// ============================================================================ -test "computeAggregatedSignatures: full overlap uses stored only" { - const allocator = std.testing.allocator; - - var ctx = try TestContext.init(allocator, 4); - defer ctx.deinit(); - - // Create attestations for all 4 validators - var attestations_list = [_]attestation.Attestation{ - ctx.createAttestation(0), - ctx.createAttestation(1), - ctx.createAttestation(2), - ctx.createAttestation(3), - }; - - // Add signatures for validators 0, 1 only - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - try ctx.addToSignatureMap(&signatures_map, 0); - try ctx.addToSignatureMap(&signatures_map, 1); - - // Create aggregated proof for ALL 4 validators (fully covers 0,1) - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - const proof_all = try ctx.createAggregatedProof(&[_]ValidatorIndex{ 0, 1, 2, 3 }); - try ctx.addAggregatedPayload(&payloads_map, 2, proof_all); - - // Create aggregation context and compute - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Should have only 1 aggregated attestation: - // - Stored proof covering {0,1,2,3} - // - signatures_map {0,1} is NOT included because all validators are covered by stored proof - try std.testing.expectEqual(@as(usize, 1), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 1), agg_ctx.attestation_signatures.len()); - - const att_bits = &(try agg_ctx.attestations.get(0)).aggregation_bits; - try std.testing.expect(try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 0, 1, 2, 3 })); -} - -// ============================================================================ -// Test 4: Greedy set-cover with competing proofs -// ============================================================================ -test "computeAggregatedSignatures: greedy set-cover" { - const allocator = std.testing.allocator; - - var ctx = try TestContext.init(allocator, 4); - defer ctx.deinit(); - - // Create attestations for all 4 validators - var attestations_list = [_]attestation.Attestation{ - ctx.createAttestation(0), - ctx.createAttestation(1), - ctx.createAttestation(2), - ctx.createAttestation(3), - }; - - // Add signature only for validator 0 - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - try ctx.addToSignatureMap(&signatures_map, 0); - - // Create competing aggregated proofs: - // Proof A: covers 1,2,3 (optimal) - // Proof B: covers 1,2 (suboptimal) - // Proof C: covers 2,3 (suboptimal) - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - const proof_a = try ctx.createAggregatedProof(&[_]ValidatorIndex{ 1, 2, 3 }); - const proof_b = try ctx.createAggregatedProof(&[_]ValidatorIndex{ 1, 2 }); - - // Add proof A and B for validator 1 lookup - try ctx.addAggregatedPayload(&payloads_map, 1, proof_a); - try ctx.addAggregatedPayload(&payloads_map, 1, proof_b); - - // Create aggregation context and compute - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Should have exactly 2 aggregated attestations: - // 1. signatures_map for validator 0 - // 2. Aggregated proof A for validators 1,2,3 - try std.testing.expectEqual(@as(usize, 2), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 2), agg_ctx.attestation_signatures.len()); - - // Verify one covers 0 and one covers 1,2,3 - var found_0 = false; - var found_1_2_3 = false; - - for (0..agg_ctx.attestations.len()) |i| { - const att_bits = &(try agg_ctx.attestations.get(i)).aggregation_bits; - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{0})) { - found_0 = true; - } - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 1, 2, 3 })) { - found_1_2_3 = true; - } - } - - try std.testing.expect(found_0); - try std.testing.expect(found_1_2_3); -} - -// ============================================================================ -// Test 5: Partial signatures_map overlap with stored proof (maximize coverage) -// signatures_map {1,2} + Stored {2,3,4} = Both included for maximum coverage {1,2,3,4} -// ============================================================================ -test "computeAggregatedSignatures: partial signatures_map overlap maximizes coverage" { - const allocator = std.testing.allocator; - - var ctx = try TestContext.init(allocator, 5); - defer ctx.deinit(); - - // Create attestations for validators 1,2,3,4 - var attestations_list = [_]attestation.Attestation{ - ctx.createAttestation(1), - ctx.createAttestation(2), - ctx.createAttestation(3), - ctx.createAttestation(4), - }; - - // Add signatures_map for validators 1, 2 only - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - try ctx.addToSignatureMap(&signatures_map, 1); - try ctx.addToSignatureMap(&signatures_map, 2); - - // Create aggregated proof for validators 2, 3, 4 (overlaps with signatures_map on 2) - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - const proof_2_3_4 = try ctx.createAggregatedProof(&[_]ValidatorIndex{ 2, 3, 4 }); - try ctx.addAggregatedPayload(&payloads_map, 3, proof_2_3_4); - - // Create aggregation context and compute - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Should have 2 aggregated attestations: - // 1. Stored proof covering {2,3,4} - // 2. signatures_map aggregation covering {1} only (validator 2 excluded - already in stored proof) - // Together they cover {1,2,3,4} without redundancy - try std.testing.expectEqual(@as(usize, 2), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 2), agg_ctx.attestation_signatures.len()); - - // Verify both aggregations exist - var found_1 = false; - var found_2_3_4 = false; - - for (0..agg_ctx.attestations.len()) |i| { - const att_bits = &(try agg_ctx.attestations.get(i)).aggregation_bits; - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{1})) { - found_1 = true; - } - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 2, 3, 4 })) { - found_2_3_4 = true; - } - } - - try std.testing.expect(found_1); - try std.testing.expect(found_2_3_4); -} - -// ============================================================================ -// Test 6: Empty attestations list -// ============================================================================ -test "computeAggregatedSignatures: empty attestations" { - const allocator = std.testing.allocator; - - var ctx = try TestContext.init(allocator, 4); - defer ctx.deinit(); - - var attestations_list = [_]attestation.Attestation{}; - - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Should have no attestations - try std.testing.expectEqual(@as(usize, 0), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 0), agg_ctx.attestation_signatures.len()); -} - -// ============================================================================ -// Test 7: No signatures available -// ============================================================================ -test "computeAggregatedSignatures: no signatures available" { - const allocator = std.testing.allocator; - - var ctx = try TestContext.init(allocator, 4); - defer ctx.deinit(); - - // Create attestations for all 4 validators - var attestations_list = [_]attestation.Attestation{ - ctx.createAttestation(0), - ctx.createAttestation(1), - ctx.createAttestation(2), - ctx.createAttestation(3), - }; - - // No signatures_map signatures - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - // No aggregated payloads - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Should have no attestations (all validators uncovered) - try std.testing.expectEqual(@as(usize, 0), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 0), agg_ctx.attestation_signatures.len()); -} - -// ============================================================================ -// Test 8: Multiple data roots (separate groups) -// ============================================================================ -test "computeAggregatedSignatures: multiple data roots" { - const allocator = std.testing.allocator; - - var ctx = try TestContext.init(allocator, 4); - defer ctx.deinit(); - - // Create second attestation data with different slot - const att_data_2 = attestation.AttestationData{ - .slot = 10, - .head = .{ .root = [_]u8{2} ** 32, .slot = 10 }, - .target = .{ .root = [_]u8{2} ** 32, .slot = 10 }, - .source = .{ .root = ZERO_HASH, .slot = 0 }, - }; - const data_root_2 = try att_data_2.sszRoot(allocator); - - // Create attestations: 0,1 with data_root_1, 2,3 with data_root_2 - var attestations_list = [_]attestation.Attestation{ - ctx.createAttestation(0), // data_root_1 - ctx.createAttestation(1), // data_root_1 - ctx.createAttestationWithData(2, att_data_2), // data_root_2 - ctx.createAttestationWithData(3, att_data_2), // data_root_2 - }; - - // Add signatures_map signatures for all - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - // Signatures for group 1 (data_root_1) - try ctx.addToSignatureMap(&signatures_map, 0); - try ctx.addToSignatureMap(&signatures_map, 1); - - // Signatures for group 2 (data_root_2) - need to sign with different data - const att_2 = attestations_list[2]; - const sig_bytes_2 = try ctx.key_manager.signAttestation(&att_2, allocator); - try signatures_map.put( - .{ .validator_id = 2, .data_root = data_root_2 }, - .{ .slot = att_data_2.slot, .signature = sig_bytes_2 }, - ); - - const att_3 = attestations_list[3]; - const sig_bytes_3 = try ctx.key_manager.signAttestation(&att_3, allocator); - try signatures_map.put( - .{ .validator_id = 3, .data_root = data_root_2 }, - .{ .slot = att_data_2.slot, .signature = sig_bytes_3 }, - ); - - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Should have exactly 2 aggregated attestations (one per data root) - try std.testing.expectEqual(@as(usize, 2), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 2), agg_ctx.attestation_signatures.len()); - - // Verify one covers 0,1 and one covers 2,3 - var found_0_1 = false; - var found_2_3 = false; - - for (0..agg_ctx.attestations.len()) |i| { - const att_bits = &(try agg_ctx.attestations.get(i)).aggregation_bits; - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 0, 1 })) { - found_0_1 = true; - } - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 2, 3 })) { - found_2_3 = true; - } - } - - try std.testing.expect(found_0_1); - try std.testing.expect(found_2_3); -} - -// ============================================================================ -// Test 9: Single validator attestation -// ============================================================================ -test "computeAggregatedSignatures: single validator" { - const allocator = std.testing.allocator; - - var ctx = try TestContext.init(allocator, 1); - defer ctx.deinit(); - - // Create attestation for single validator - var attestations_list = [_]attestation.Attestation{ - ctx.createAttestation(0), - }; - - // Add signatures_map signature - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - try ctx.addToSignatureMap(&signatures_map, 0); - - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Should have exactly 1 aggregated attestation with 1 validator - try std.testing.expectEqual(@as(usize, 1), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 1), agg_ctx.attestation_signatures.len()); - - const att_bits = &(try agg_ctx.attestations.get(0)).aggregation_bits; - try std.testing.expect(try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{0})); -} - -// ============================================================================ -// Test 10: Complex scenario with 3 attestation_data types -// - Group 1: All validators have signatures_map signatures (pure signatures_map) -// - Group 2: All validators covered by aggregated_payload only (pure stored) -// - Group 3: Overlap - some signatures_map + stored proof covering some signatures_map validators -// ============================================================================ -test "computeAggregatedSignatures: complex 3 groups" { - const allocator = std.testing.allocator; - - // Need 10 validators for this test - var ctx = try TestContext.init(allocator, 10); - defer ctx.deinit(); - - // Create 3 different attestation data types - const att_data_1 = ctx.attestation_data; // slot 5 (uses ctx.data_root for signatures_map) - - const att_data_2 = attestation.AttestationData{ - .slot = 10, - .head = .{ .root = [_]u8{2} ** 32, .slot = 10 }, - .target = .{ .root = [_]u8{2} ** 32, .slot = 10 }, - .source = .{ .root = ZERO_HASH, .slot = 0 }, - }; - const data_root_2 = try att_data_2.sszRoot(allocator); - - const att_data_3 = attestation.AttestationData{ - .slot = 15, - .head = .{ .root = [_]u8{3} ** 32, .slot = 15 }, - .target = .{ .root = [_]u8{3} ** 32, .slot = 15 }, - .source = .{ .root = ZERO_HASH, .slot = 0 }, - }; - const data_root_3 = try att_data_3.sszRoot(allocator); - - // Create attestations for all groups: - // Group 1 (data_root_1): validators 0,1,2 - pure signatures_map - // Group 2 (data_root_2): validators 3,4,5 - pure stored - // Group 3 (data_root_3): validators 6,7,8,9 - overlap (signatures_map 6,7 + stored 7,8,9) - var attestations_list = [_]attestation.Attestation{ - // Group 1 - ctx.createAttestationWithData(0, att_data_1), - ctx.createAttestationWithData(1, att_data_1), - ctx.createAttestationWithData(2, att_data_1), - // Group 2 - ctx.createAttestationWithData(3, att_data_2), - ctx.createAttestationWithData(4, att_data_2), - ctx.createAttestationWithData(5, att_data_2), - // Group 3 - ctx.createAttestationWithData(6, att_data_3), - ctx.createAttestationWithData(7, att_data_3), - ctx.createAttestationWithData(8, att_data_3), - ctx.createAttestationWithData(9, att_data_3), - }; - - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - // Group 1: Add signatures_map signatures for validators 0,1,2 - try ctx.addToSignatureMap(&signatures_map, 0); - try ctx.addToSignatureMap(&signatures_map, 1); - try ctx.addToSignatureMap(&signatures_map, 2); - - // Group 2: No signatures_map signatures (all from stored) - - // Group 3: Add signatures_map signatures for validators 6,7 only - const att_6 = attestations_list[6]; - const sig_bytes_6 = try ctx.key_manager.signAttestation(&att_6, allocator); - try signatures_map.put( - .{ .validator_id = 6, .data_root = data_root_3 }, - .{ .slot = att_data_3.slot, .signature = sig_bytes_6 }, - ); - - const att_7 = attestations_list[7]; - const sig_bytes_7 = try ctx.key_manager.signAttestation(&att_7, allocator); - try signatures_map.put( - .{ .validator_id = 7, .data_root = data_root_3 }, - .{ .slot = att_data_3.slot, .signature = sig_bytes_7 }, - ); - - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - // Group 2: Create aggregated proof for validators 3,4,5 - { - // Need to create proof with att_data_2 - var sigs = std.ArrayList(xmss.Signature).init(allocator); - defer { - for (sigs.items) |*sig| sig.deinit(); - sigs.deinit(); - } - var pks = std.ArrayList(xmss.PublicKey).init(allocator); - defer { - for (pks.items) |*pk| pk.deinit(); - pks.deinit(); - } - - for ([_]ValidatorIndex{ 3, 4, 5 }) |vid| { - const att = attestations_list[vid]; - const sig_bytes = try ctx.key_manager.signAttestation(&att, allocator); - var sig = try xmss.Signature.fromBytes(&sig_bytes); - errdefer sig.deinit(); - const val = try ctx.validators.get(@intCast(vid)); - var pk = try xmss.PublicKey.fromBytes(&val.pubkey); - errdefer pk.deinit(); - try sigs.append(sig); - try pks.append(pk); - } - - var pk_handles = try allocator.alloc(*const xmss.HashSigPublicKey, 3); - defer allocator.free(pk_handles); - var sig_handles = try allocator.alloc(*const xmss.HashSigSignature, 3); - defer allocator.free(sig_handles); - - for (pks.items, 0..) |*pk, i| pk_handles[i] = pk.handle; - for (sigs.items, 0..) |*sig, i| sig_handles[i] = sig.handle; - - var participants = try attestation.AggregationBits.init(allocator); - errdefer participants.deinit(); - for ([_]ValidatorIndex{ 3, 4, 5 }) |vid| { - try attestation.aggregationBitsSet(&participants, @intCast(vid), true); - } - - var message_hash: [32]u8 = undefined; - try zeam_utils.hashTreeRoot(attestation.AttestationData, att_data_2, &message_hash, allocator); - - var proof = try aggregation.AggregatedSignatureProof.init(allocator); - errdefer proof.deinit(); - - try aggregation.AggregatedSignatureProof.aggregate( - participants, - pk_handles, - sig_handles, - &message_hash, - att_data_2.slot, - &proof, - ); - - // Add to payloads_map for validator 3 - const key = SignatureKey{ .validator_id = 3, .data_root = data_root_2 }; - const gop = try payloads_map.getOrPut(key); - if (!gop.found_existing) { - gop.value_ptr.* = AggregatedPayloadsList.init(allocator); - } - try gop.value_ptr.append(.{ .slot = att_data_2.slot, .proof = proof }); - } - - // Group 3: Create aggregated proof for validators 7,8,9 (overlaps with signatures_map on 7) - { - var sigs = std.ArrayList(xmss.Signature).init(allocator); - defer { - for (sigs.items) |*sig| sig.deinit(); - sigs.deinit(); - } - var pks = std.ArrayList(xmss.PublicKey).init(allocator); - defer { - for (pks.items) |*pk| pk.deinit(); - pks.deinit(); - } - - for ([_]ValidatorIndex{ 7, 8, 9 }) |vid| { - const att = attestations_list[vid]; - const sig_bytes = try ctx.key_manager.signAttestation(&att, allocator); - var sig = try xmss.Signature.fromBytes(&sig_bytes); - errdefer sig.deinit(); - const val = try ctx.validators.get(@intCast(vid)); - var pk = try xmss.PublicKey.fromBytes(&val.pubkey); - errdefer pk.deinit(); - try sigs.append(sig); - try pks.append(pk); - } - - var pk_handles = try allocator.alloc(*const xmss.HashSigPublicKey, 3); - defer allocator.free(pk_handles); - var sig_handles = try allocator.alloc(*const xmss.HashSigSignature, 3); - defer allocator.free(sig_handles); - - for (pks.items, 0..) |*pk, i| pk_handles[i] = pk.handle; - for (sigs.items, 0..) |*sig, i| sig_handles[i] = sig.handle; - - var participants = try attestation.AggregationBits.init(allocator); - errdefer participants.deinit(); - for ([_]ValidatorIndex{ 7, 8, 9 }) |vid| { - try attestation.aggregationBitsSet(&participants, @intCast(vid), true); - } - - var message_hash: [32]u8 = undefined; - try zeam_utils.hashTreeRoot(attestation.AttestationData, att_data_3, &message_hash, allocator); - - var proof = try aggregation.AggregatedSignatureProof.init(allocator); - errdefer proof.deinit(); - - try aggregation.AggregatedSignatureProof.aggregate( - participants, - pk_handles, - sig_handles, - &message_hash, - att_data_3.slot, - &proof, - ); - - // Add to payloads_map for validator 8 (one of the remaining signatures_map validators) - const key = SignatureKey{ .validator_id = 8, .data_root = data_root_3 }; - const gop = try payloads_map.getOrPut(key); - if (!gop.found_existing) { - gop.value_ptr.* = AggregatedPayloadsList.init(allocator); - } - try gop.value_ptr.append(.{ .slot = att_data_3.slot, .proof = proof }); - } - - // Execute - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Expected results: - // - Group 1: 1 attestation from signatures_map {0,1,2} - // - Group 2: 1 attestation from stored {3,4,5} - // - Group 3: 2 attestations - stored {7,8,9} + signatures_map {6} (7 excluded from signatures_map) - // Total: 4 attestations - try std.testing.expectEqual(@as(usize, 4), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 4), agg_ctx.attestation_signatures.len()); - - // Verify each group - var found_0_1_2 = false; - var found_3_4_5 = false; - var found_7_8_9 = false; - var found_6 = false; - - for (0..agg_ctx.attestations.len()) |i| { - const att_bits = &(try agg_ctx.attestations.get(i)).aggregation_bits; - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 0, 1, 2 })) { - found_0_1_2 = true; - } - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 3, 4, 5 })) { - found_3_4_5 = true; - } - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 7, 8, 9 })) { - found_7_8_9 = true; - } - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{6})) { - found_6 = true; - } - } - - try std.testing.expect(found_0_1_2); // Group 1: pure signatures_map - try std.testing.expect(found_3_4_5); // Group 2: pure stored - try std.testing.expect(found_7_8_9); // Group 3: stored proof - try std.testing.expect(found_6); // Group 3: remaining signatures_map (7 excluded) -} - -// ============================================================================ -// Test 11: Validator without signature is excluded -// signatures_map {1} + aggregated_payload {2,3} = attestations {1} + {2,3}, validator 4 excluded -// ============================================================================ -test "computeAggregatedSignatures: validator without signature excluded" { - const allocator = std.testing.allocator; - - var ctx = try TestContext.init(allocator, 5); - defer ctx.deinit(); - - // Create attestations for validators 1, 2, 3, 4 - var attestations_list = [_]attestation.Attestation{ - ctx.createAttestation(1), - ctx.createAttestation(2), - ctx.createAttestation(3), - ctx.createAttestation(4), - }; - - // Add signature only for validator 1 to signatures_map - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - try ctx.addToSignatureMap(&signatures_map, 1); - - // Create aggregated proof for validators 2, 3 only - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - const proof_2_3 = try ctx.createAggregatedProof(&[_]ValidatorIndex{ 2, 3 }); - try ctx.addAggregatedPayload(&payloads_map, 2, proof_2_3); - - // Create aggregation context and compute - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Should have exactly 2 aggregated attestations: - // 1. signatures_map for validator 1 - // 2. Aggregated proof for validators 2, 3 - // Validator 4 should be excluded (no signature available) - try std.testing.expectEqual(@as(usize, 2), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 2), agg_ctx.attestation_signatures.len()); - - // Verify one covers {1} and one covers {2, 3} - var found_1 = false; - var found_2_3 = false; - - for (0..agg_ctx.attestations.len()) |i| { - const att_bits = &(try agg_ctx.attestations.get(i)).aggregation_bits; - - // Check for validator 1 only - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{1})) { - found_1 = true; - } - // Check for validators 2, 3 - if (try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 2, 3 })) { - found_2_3 = true; - } - - // Verify validator 4 is NOT included in any attestation - // If the bitlist has fewer than 5 elements, validator 4 can't be included - if (att_bits.len() > 4) { - try std.testing.expect(!(try att_bits.get(4))); - } - } - - try std.testing.expect(found_1); - try std.testing.expect(found_2_3); -} - -// ============================================================================ -// Test 12: Single attestation lookup key with all validators in aggregated payload -// Attestations for validators 1,2 nothing in signatures_map, -// aggregated_payload {1,2,3,4} indexed by validator 1 => all bits set -// Validators 3 and 4 are included although not covered by attestations_list -// ============================================================================ -test "computeAggregatedSignatures: empty signatures_map with full aggregated payload" { - const allocator = std.testing.allocator; - - var ctx = try TestContext.init(allocator, 5); - defer ctx.deinit(); - - // Create attestations for validators 1, 2 - var attestations_list = [_]attestation.Attestation{ - ctx.createAttestation(1), - ctx.createAttestation(2), - }; - - // Empty signatures_map - nothing found while iterating - var signatures_map = SignaturesMap.init(allocator); - defer deinitSignaturesMap(&signatures_map); - - // Create aggregated proof for validators 1, 2, 3, 4 indexed by validator 1 - var payloads_map = AggregatedPayloadsMap.init(allocator); - defer deinitPayloadsMap(&payloads_map); - - const proof_1_2_3_4 = try ctx.createAggregatedProof(&[_]ValidatorIndex{ 1, 2, 3, 4 }); - try ctx.addAggregatedPayload(&payloads_map, 1, proof_1_2_3_4); - - // Create aggregation context and compute - var agg_ctx = try AggregatedAttestationsResult.init(allocator); - defer agg_ctx.deinit(); - - try agg_ctx.computeAggregatedSignatures( - &attestations_list, - &ctx.validators, - &signatures_map, - &payloads_map, - ); - - // Should have exactly 1 aggregated attestation covering all 4 validators - try std.testing.expectEqual(@as(usize, 1), agg_ctx.attestations.len()); - try std.testing.expectEqual(@as(usize, 1), agg_ctx.attestation_signatures.len()); - - // Verify attestation_bits are set for validators 1, 2, 3, 4 - const att_bits = &(try agg_ctx.attestations.get(0)).aggregation_bits; - try std.testing.expect(try TestContext.checkParticipants(att_bits, &[_]ValidatorIndex{ 1, 2, 3, 4 })); -} diff --git a/pkgs/types/src/lib.zig b/pkgs/types/src/lib.zig index b0b084af5..40f8fc4f5 100644 --- a/pkgs/types/src/lib.zig +++ b/pkgs/types/src/lib.zig @@ -22,11 +22,10 @@ pub const BeamBlockBody = block.BeamBlockBody; pub const BlockWithAttestation = block.BlockWithAttestation; pub const SignedBlockWithAttestation = block.SignedBlockWithAttestation; pub const AggregatedAttestations = block.AggregatedAttestations; -pub const AggregatedAttestationsResult = block.AggregatedAttestationsResult; pub const AttestationSignatures = block.AttestationSignatures; pub const BlockSignatures = block.BlockSignatures; pub const createBlockSignatures = block.createBlockSignatures; -pub const SignatureKey = block.SignatureKey; +pub const aggregateInnerMap = block.AggregateInnerMap; pub const StoredSignature = block.StoredSignature; pub const SignaturesMap = block.SignaturesMap; pub const StoredAggregatedPayload = block.StoredAggregatedPayload; From 6c633d6e51a8615e597f7085484575c32cf17d23 Mon Sep 17 00:00:00 2001 From: Chen Kai <281165273grape@gmail.com> Date: Wed, 1 Apr 2026 21:51:18 +0800 Subject: [PATCH 24/24] feat: use xev Dynamic API for runtime io_uring/epoll detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace compile-time backend override (forked libxev) with upstream xev's Dynamic API. On Linux, Dynamic probes io_uring first and falls back to epoll at runtime — exactly what Shadow needs since it doesn't support io_uring_setup. On macOS, Dynamic degenerates to static kqueue. - Switch all `@import("xev")` to `@import("xev").Dynamic` - Add `detectBackend()` helper with `@hasDecl` guard for single-backend platforms (macOS) where `detect()` doesn't exist - Revert build.zig.zon to upstream libxev (no fork needed) - Remove build.zig XevBackend option --- build.zig | 2 ++ pkgs/cli/src/main.zig | 5 ++++- pkgs/cli/src/node.zig | 2 +- pkgs/network/src/ethlibp2p.zig | 2 +- pkgs/network/src/interface.zig | 2 +- pkgs/network/src/mock.zig | 4 +++- pkgs/node/src/clock.zig | 2 +- pkgs/node/src/lib.zig | 1 + pkgs/node/src/node.zig | 2 +- pkgs/node/src/testing.zig | 5 ++++- pkgs/node/src/utils.zig | 11 ++++++++++- 11 files changed, 29 insertions(+), 9 deletions(-) diff --git a/build.zig b/build.zig index b1e71d1f4..744a73620 100644 --- a/build.zig +++ b/build.zig @@ -86,6 +86,8 @@ pub fn build(b: *Builder) !void { // LTO option (disabled by default for faster builds) const enable_lto = b.option(bool, "lto", "Enable Link Time Optimization (slower builds, smaller binaries)") orelse false; + // xev backend override is no longer needed — Dynamic API handles runtime detection. + // add ssz const ssz = b.dependency("ssz", .{ .target = target, diff --git a/pkgs/cli/src/main.zig b/pkgs/cli/src/main.zig index caf9478f4..4e8cd524e 100644 --- a/pkgs/cli/src/main.zig +++ b/pkgs/cli/src/main.zig @@ -19,7 +19,7 @@ const node_lib = @import("@zeam/node"); const Clock = node_lib.Clock; const state_proving_manager = @import("@zeam/state-proving-manager"); const BeamNode = node_lib.BeamNode; -const xev = @import("xev"); +const xev = @import("xev").Dynamic; const Multiaddr = @import("multiaddr").Multiaddr; const configs = @import("@zeam/configs"); @@ -281,6 +281,9 @@ fn mainInner() !void { std.debug.print("opts={any} genesis={d}\n", .{ opts.args, genesis }); + // Detect the best available I/O backend (io_uring or epoll on Linux). + node_lib.detectBackend(); + switch (opts.args.__commands__) { .clock => { var loop = xev.Loop.init(.{}) catch |err| { diff --git a/pkgs/cli/src/node.zig b/pkgs/cli/src/node.zig index 594ad253b..97ce9c2f4 100644 --- a/pkgs/cli/src/node.zig +++ b/pkgs/cli/src/node.zig @@ -12,7 +12,7 @@ const ChainConfig = configs.ChainConfig; const Chain = configs.Chain; const ChainOptions = configs.ChainOptions; const sft = @import("@zeam/state-transition"); -const xev = @import("xev"); +const xev = @import("xev").Dynamic; const networks = @import("@zeam/network"); const Multiaddr = @import("multiaddr").Multiaddr; const node_lib = @import("@zeam/node"); diff --git a/pkgs/network/src/ethlibp2p.zig b/pkgs/network/src/ethlibp2p.zig index 9e8343e79..8fcdee569 100644 --- a/pkgs/network/src/ethlibp2p.zig +++ b/pkgs/network/src/ethlibp2p.zig @@ -4,7 +4,7 @@ const Thread = std.Thread; const ssz = @import("ssz"); const types = @import("@zeam/types"); -const xev = @import("xev"); +const xev = @import("xev").Dynamic; const multiformats = @import("multiformats"); const multiaddr_mod = @import("multiaddr"); const Multiaddr = multiaddr_mod.Multiaddr; diff --git a/pkgs/network/src/interface.zig b/pkgs/network/src/interface.zig index d4cb1a6be..44f5b4e93 100644 --- a/pkgs/network/src/interface.zig +++ b/pkgs/network/src/interface.zig @@ -4,7 +4,7 @@ const json = std.json; const types = @import("@zeam/types"); const ssz = @import("ssz"); -const xev = @import("xev"); +const xev = @import("xev").Dynamic; const zeam_utils = @import("@zeam/utils"); const consensus_params = @import("@zeam/params"); diff --git a/pkgs/network/src/mock.zig b/pkgs/network/src/mock.zig index bf776d5fa..ee8797797 100644 --- a/pkgs/network/src/mock.zig +++ b/pkgs/network/src/mock.zig @@ -2,7 +2,7 @@ const std = @import("std"); const Allocator = std.mem.Allocator; const types = @import("@zeam/types"); -const xev = @import("xev"); +const xev = @import("xev").Dynamic; const zeam_utils = @import("@zeam/utils"); const interface = @import("./interface.zig"); @@ -774,6 +774,7 @@ test "Mock messaging across two subscribers" { defer arena_allocator.deinit(); const allocator = arena_allocator.allocator(); + if (@hasDecl(xev, "detect")) xev.detect() catch @panic("no available xev backend"); var loop = try xev.Loop.init(.{}); defer loop.deinit(); @@ -954,6 +955,7 @@ test "Mock status RPC between peers" { defer arena_allocator.deinit(); const allocator = arena_allocator.allocator(); + if (@hasDecl(xev, "detect")) xev.detect() catch @panic("no available xev backend"); var loop = try xev.Loop.init(.{}); defer loop.deinit(); diff --git a/pkgs/node/src/clock.zig b/pkgs/node/src/clock.zig index 33a2b2578..d3b43e5ae 100644 --- a/pkgs/node/src/clock.zig +++ b/pkgs/node/src/clock.zig @@ -1,7 +1,7 @@ const std = @import("std"); const Allocator = std.mem.Allocator; -const xev = @import("xev"); +const xev = @import("xev").Dynamic; const constants = @import("./constants.zig"); diff --git a/pkgs/node/src/lib.zig b/pkgs/node/src/lib.zig index e546f1189..9879398da 100644 --- a/pkgs/node/src/lib.zig +++ b/pkgs/node/src/lib.zig @@ -11,6 +11,7 @@ pub const fcFactory = @import("./forkchoice.zig"); pub const tree_visualizer = @import("./tree_visualizer.zig"); pub const constants = @import("./constants.zig"); pub const utils = @import("./utils.zig"); +pub const detectBackend = utils.detectBackend; const networks = @import("@zeam/network"); pub const NodeNameRegistry = networks.NodeNameRegistry; diff --git a/pkgs/node/src/node.zig b/pkgs/node/src/node.zig index 97799aba3..d012a5cdf 100644 --- a/pkgs/node/src/node.zig +++ b/pkgs/node/src/node.zig @@ -1413,7 +1413,7 @@ pub const BeamNode = struct { } }; -const xev = @import("xev"); +const xev = @import("xev").Dynamic; test "Node peer tracking on connect/disconnect" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); diff --git a/pkgs/node/src/testing.zig b/pkgs/node/src/testing.zig index 46449ed4f..daa2fc918 100644 --- a/pkgs/node/src/testing.zig +++ b/pkgs/node/src/testing.zig @@ -7,7 +7,7 @@ const key_manager = @import("@zeam/key-manager"); const params = @import("@zeam/params"); const types = @import("@zeam/types"); const zeam_utils = @import("@zeam/utils"); -const xev = @import("xev"); +const xev = @import("xev").Dynamic; const networks = @import("@zeam/network"); const xmss = @import("@zeam/xmss"); const clockFactory = @import("./clock.zig"); @@ -47,6 +47,9 @@ pub const NodeTestContext = struct { spec_name_owned: bool = true, pub fn init(allocator: Allocator, opts: NodeTestOptions) !NodeTestContext { + const utils = @import("./utils.zig"); + utils.detectBackend(); + var loop = try xev.Loop.init(.{}); errdefer loop.deinit(); diff --git a/pkgs/node/src/utils.zig b/pkgs/node/src/utils.zig index 862de052d..ceaac3dd2 100644 --- a/pkgs/node/src/utils.zig +++ b/pkgs/node/src/utils.zig @@ -2,9 +2,18 @@ const std = @import("std"); const Thread = std.Thread; const Mutex = Thread.Mutex; -const xev = @import("xev"); +const xev = @import("xev").Dynamic; const types = @import("@zeam/types"); +/// Detect the best available I/O backend at runtime. +/// On Linux this probes io_uring, falling back to epoll (needed for Shadow). +/// On single-backend platforms (macOS/kqueue) this is a no-op. +pub fn detectBackend() void { + if (@hasDecl(xev, "detect")) { + xev.detect() catch @panic("no available xev backend"); + } +} + pub const EventLoop = struct { loop: *xev.Loop, // events from libp2p or other threads will also be pushed on it