diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fabb93ea4fd..943f36c810d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -283,7 +283,6 @@ jobs: needs: - cargo-verifications - publish-crates-check - - build-docker-images - cargo-test-kms runs-on: ubuntu-latest steps: @@ -342,8 +341,8 @@ jobs: matrix: arch: [ # build on native runners instead of using emulation - {platform: linux/amd64, runner: buildjet-8vcpu-ubuntu-2204}, - {platform: linux/arm64, runner: buildjet-16vcpu-ubuntu-2204-arm} + { platform: linux/amd64, runner: buildjet-8vcpu-ubuntu-2204 }, + { platform: linux/arm64, runner: buildjet-16vcpu-ubuntu-2204-arm } ] runs-on: ${{ matrix.arch.runner }} permissions: diff --git a/CHANGELOG.md b/CHANGELOG.md index e37dbef4585..fa34c9f3c1b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,27 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Added - [2327](https://github.com/FuelLabs/fuel-core/pull/2327): Add more services tests and more checks of the pool. Also add an high level documentation for users of the pool and contributors. -======= +## [Version 0.40.0] + +### Added +- [2347](https://github.com/FuelLabs/fuel-core/pull/2347): Add GraphQL complexity histogram to metrics. +- [2350](https://github.com/FuelLabs/fuel-core/pull/2350): Added a new CLI flag `graphql-number-of-threads` to limit the number of threads used by the GraphQL service. The default value is `2`, `0` enables the old behavior. +- [2335](https://github.com/FuelLabs/fuel-core/pull/2335): Added CLI arguments for configuring GraphQL query costs. + +### Fixed +- [2345](https://github.com/FuelLabs/fuel-core/pull/2345): In PoA increase priority of block creation timer trigger compare to txpool event management + +### Changed +- [2334](https://github.com/FuelLabs/fuel-core/pull/2334): Prepare the GraphQL service for the switching to `async` methods. +- [2310](https://github.com/FuelLabs/fuel-core/pull/2310): New metrics: "The gas prices used in a block" (`importer_gas_price_for_block`), "The total gas used in a block" (`importer_gas_per_block`), "The total fee (gwei) paid by transactions in a block" (`importer_fee_per_block_gwei`), "The total number of transactions in a block" (`importer_transactions_per_block`), P2P metrics for swarm and protocol. +- [2340](https://github.com/FuelLabs/fuel-core/pull/2340): Avoid long heavy tasks in the GraphQL service by splitting work into batches. +- [2341](https://github.com/FuelLabs/fuel-core/pull/2341): Updated all pagination queries to work with the async stream instead of the sync iterator. +- [2350](https://github.com/FuelLabs/fuel-core/pull/2350): Limited the number of threads used by the GraphQL service. + +#### Breaking +- [2310](https://github.com/FuelLabs/fuel-core/pull/2310): The `metrics` command-line parameter has been replaced with `disable-metrics`. Metrics are now enabled by default, with the option to disable them entirely or on a per-module basis. +- [2341](https://github.com/FuelLabs/fuel-core/pull/2341): The maximum number of processed coins from the `coins_to_spend` query is limited to `max_inputs`. + ## [Version 0.39.0] ### Added diff --git a/Cargo.lock b/Cargo.lock index 0df5ae80960..bc6071f4082 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1330,9 +1330,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.29" +version = "1.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e804ac3194a48bb129643eb1d62fcc20d18c6b8c181704489353d13120bcd1" +checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945" dependencies = [ "jobserver", "libc", @@ -3168,7 +3168,7 @@ dependencies = [ [[package]] name = "fuel-core" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "assert_matches", @@ -3197,7 +3197,7 @@ dependencies = [ "fuel-core-sync", "fuel-core-trace", "fuel-core-txpool", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "fuel-core-upgradable-executor", "futures", "hex", @@ -3247,7 +3247,7 @@ dependencies = [ "fuel-core-services", "fuel-core-storage", "fuel-core-sync", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "futures", "itertools 0.12.1", "num_enum", @@ -3268,11 +3268,11 @@ dependencies = [ [[package]] name = "fuel-core-bft" -version = "0.39.0" +version = "0.40.0" [[package]] name = "fuel-core-bin" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "aws-config", @@ -3284,9 +3284,10 @@ dependencies = [ "fuel-core", "fuel-core-chain-config", "fuel-core-compression", + "fuel-core-metrics", "fuel-core-poa", "fuel-core-storage", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "hex", "humantime", "itertools 0.12.1", @@ -3296,6 +3297,7 @@ dependencies = [ "rand", "serde", "serde_json", + "strum 0.25.0", "tempfile", "test-case", "tikv-jemallocator", @@ -3307,7 +3309,7 @@ dependencies = [ [[package]] name = "fuel-core-chain-config" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "bech32", @@ -3315,7 +3317,7 @@ dependencies = [ "derivative", "fuel-core-chain-config", "fuel-core-storage", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "insta", "itertools 0.12.1", "parquet", @@ -3333,14 +3335,14 @@ dependencies = [ [[package]] name = "fuel-core-client" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "base64 0.22.1", "cynic", "derive_more", "eventsource-client", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "futures", "hex", "hyper-rustls", @@ -3357,22 +3359,22 @@ dependencies = [ [[package]] name = "fuel-core-client-bin" -version = "0.39.0" +version = "0.40.0" dependencies = [ "clap 4.5.20", "fuel-core-client", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "serde_json", "tokio", ] [[package]] name = "fuel-core-compression" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "fuel-core-compression", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "paste", "postcard", "proptest", @@ -3385,30 +3387,30 @@ dependencies = [ [[package]] name = "fuel-core-consensus-module" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "fuel-core-chain-config", "fuel-core-poa", "fuel-core-storage", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "test-case", ] [[package]] name = "fuel-core-database" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "derive_more", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", ] [[package]] name = "fuel-core-e2e-client" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "assert_cmd", @@ -3416,7 +3418,7 @@ dependencies = [ "fuel-core-chain-config", "fuel-core-client", "fuel-core-trace", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "futures", "hex", "humantime-serde", @@ -3433,12 +3435,12 @@ dependencies = [ [[package]] name = "fuel-core-executor" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "hex", "parking_lot", "serde", @@ -3447,14 +3449,14 @@ dependencies = [ [[package]] name = "fuel-core-gas-price-service" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "async-trait", "enum-iterator", "fuel-core-services", "fuel-core-storage", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "fuel-gas-price-algorithm", "futures", "num_enum", @@ -3471,14 +3473,14 @@ dependencies = [ [[package]] name = "fuel-core-importer" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "derive_more", "fuel-core-metrics", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "mockall", "parking_lot", "rayon", @@ -3489,18 +3491,18 @@ dependencies = [ [[package]] name = "fuel-core-keygen" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "clap 4.5.20", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "libp2p-identity", "serde", ] [[package]] name = "fuel-core-keygen-bin" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "atty", @@ -3513,19 +3515,22 @@ dependencies = [ [[package]] name = "fuel-core-metrics" -version = "0.39.0" +version = "0.40.0" dependencies = [ + "once_cell", "parking_lot", "pin-project-lite", "prometheus-client", "regex", + "strum 0.25.0", + "strum_macros 0.25.3", "tokio", "tracing", ] [[package]] name = "fuel-core-p2p" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "async-trait", @@ -3536,7 +3541,7 @@ dependencies = [ "fuel-core-services", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "futures", "hex", "hickory-resolver", @@ -3563,7 +3568,7 @@ dependencies = [ [[package]] name = "fuel-core-poa" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "async-trait", @@ -3574,7 +3579,7 @@ dependencies = [ "fuel-core-services", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "k256", "mockall", "rand", @@ -3588,7 +3593,7 @@ dependencies = [ [[package]] name = "fuel-core-producer" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "async-trait", @@ -3596,7 +3601,7 @@ dependencies = [ "fuel-core-producer", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "mockall", "proptest", "rand", @@ -3607,7 +3612,7 @@ dependencies = [ [[package]] name = "fuel-core-relayer" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "async-trait", @@ -3621,7 +3626,7 @@ dependencies = [ "fuel-core-services", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "futures", "mockall", "once_cell", @@ -3640,7 +3645,7 @@ dependencies = [ [[package]] name = "fuel-core-services" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "async-trait", @@ -3649,6 +3654,7 @@ dependencies = [ "futures", "mockall", "parking_lot", + "pin-project-lite", "rayon", "tokio", "tracing", @@ -3656,13 +3662,13 @@ dependencies = [ [[package]] name = "fuel-core-storage" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "derive_more", "enum-iterator", "fuel-core-storage", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "fuel-vm 0.58.2", "impl-tools", "itertools 0.12.1", @@ -3680,13 +3686,13 @@ dependencies = [ [[package]] name = "fuel-core-sync" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "async-trait", "fuel-core-services", "fuel-core-trace", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "futures", "mockall", "rand", @@ -3721,7 +3727,7 @@ dependencies = [ "fuel-core-storage", "fuel-core-trace", "fuel-core-txpool", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "fuel-core-upgradable-executor", "futures", "hex", @@ -3747,7 +3753,7 @@ dependencies = [ [[package]] name = "fuel-core-trace" -version = "0.39.0" +version = "0.40.0" dependencies = [ "ctor", "tracing", @@ -3757,7 +3763,7 @@ dependencies = [ [[package]] name = "fuel-core-txpool" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "async-trait", @@ -3766,7 +3772,7 @@ dependencies = [ "fuel-core-storage", "fuel-core-trace", "fuel-core-txpool", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "futures", "mockall", "num-rational", @@ -3798,7 +3804,7 @@ dependencies = [ [[package]] name = "fuel-core-types" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "bs58", @@ -3814,13 +3820,13 @@ dependencies = [ [[package]] name = "fuel-core-upgradable-executor" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "derive_more", "fuel-core-executor", "fuel-core-storage", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "fuel-core-wasm-executor", "ntest", "parking_lot", @@ -3831,13 +3837,13 @@ dependencies = [ [[package]] name = "fuel-core-wasm-executor" -version = "0.39.0" +version = "0.40.0" dependencies = [ "anyhow", "fuel-core-executor", "fuel-core-storage", "fuel-core-types 0.35.0", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "postcard", "proptest", "serde", @@ -3907,7 +3913,7 @@ dependencies = [ [[package]] name = "fuel-gas-price-algorithm" -version = "0.39.0" +version = "0.40.0" dependencies = [ "proptest", "rand", @@ -6714,9 +6720,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" +checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", "thiserror", @@ -7866,9 +7872,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "rusty-fork" @@ -8815,7 +8821,7 @@ dependencies = [ "fuel-core-storage", "fuel-core-trace", "fuel-core-txpool", - "fuel-core-types 0.39.0", + "fuel-core-types 0.40.0", "futures", "itertools 0.12.1", "rand", diff --git a/Cargo.toml b/Cargo.toml index 10230180a1f..d80bf4f5bdc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,40 +51,40 @@ homepage = "https://fuel.network/" keywords = ["blockchain", "cryptocurrencies", "fuel-vm", "vm"] license = "BUSL-1.1" repository = "https://github.com/FuelLabs/fuel-core" -version = "0.39.0" +version = "0.40.0" [workspace.dependencies] # Workspace members -fuel-core = { version = "0.39.0", path = "./crates/fuel-core", default-features = false } -fuel-core-client-bin = { version = "0.39.0", path = "./bin/fuel-core-client" } -fuel-core-bin = { version = "0.39.0", path = "./bin/fuel-core" } -fuel-core-keygen = { version = "0.39.0", path = "./crates/keygen" } -fuel-core-keygen-bin = { version = "0.39.0", path = "./bin/keygen" } -fuel-core-chain-config = { version = "0.39.0", path = "./crates/chain-config", default-features = false } -fuel-core-client = { version = "0.39.0", path = "./crates/client" } -fuel-core-compression = { version = "0.39.0", path = "./crates/compression" } -fuel-core-database = { version = "0.39.0", path = "./crates/database" } -fuel-core-metrics = { version = "0.39.0", path = "./crates/metrics" } -fuel-core-services = { version = "0.39.0", path = "./crates/services" } -fuel-core-consensus-module = { version = "0.39.0", path = "./crates/services/consensus_module" } -fuel-core-bft = { version = "0.39.0", path = "./crates/services/consensus_module/bft" } -fuel-core-poa = { version = "0.39.0", path = "./crates/services/consensus_module/poa" } -fuel-core-executor = { version = "0.39.0", path = "./crates/services/executor", default-features = false } -fuel-core-importer = { version = "0.39.0", path = "./crates/services/importer" } -fuel-core-gas-price-service = { version = "0.39.0", path = "crates/services/gas_price_service" } -fuel-core-p2p = { version = "0.39.0", path = "./crates/services/p2p" } -fuel-core-producer = { version = "0.39.0", path = "./crates/services/producer" } -fuel-core-relayer = { version = "0.39.0", path = "./crates/services/relayer" } -fuel-core-sync = { version = "0.39.0", path = "./crates/services/sync" } -fuel-core-txpool = { version = "0.39.0", path = "./crates/services/txpool_v2" } -fuel-core-storage = { version = "0.39.0", path = "./crates/storage", default-features = false } -fuel-core-trace = { version = "0.39.0", path = "./crates/trace" } -fuel-core-types = { version = "0.39.0", path = "./crates/types", default-features = false } +fuel-core = { version = "0.40.0", path = "./crates/fuel-core", default-features = false } +fuel-core-client-bin = { version = "0.40.0", path = "./bin/fuel-core-client" } +fuel-core-bin = { version = "0.40.0", path = "./bin/fuel-core" } +fuel-core-keygen = { version = "0.40.0", path = "./crates/keygen" } +fuel-core-keygen-bin = { version = "0.40.0", path = "./bin/keygen" } +fuel-core-chain-config = { version = "0.40.0", path = "./crates/chain-config", default-features = false } +fuel-core-client = { version = "0.40.0", path = "./crates/client" } +fuel-core-compression = { version = "0.40.0", path = "./crates/compression" } +fuel-core-database = { version = "0.40.0", path = "./crates/database" } +fuel-core-metrics = { version = "0.40.0", path = "./crates/metrics" } +fuel-core-services = { version = "0.40.0", path = "./crates/services" } +fuel-core-consensus-module = { version = "0.40.0", path = "./crates/services/consensus_module" } +fuel-core-bft = { version = "0.40.0", path = "./crates/services/consensus_module/bft" } +fuel-core-poa = { version = "0.40.0", path = "./crates/services/consensus_module/poa" } +fuel-core-executor = { version = "0.40.0", path = "./crates/services/executor", default-features = false } +fuel-core-importer = { version = "0.40.0", path = "./crates/services/importer" } +fuel-core-gas-price-service = { version = "0.40.0", path = "crates/services/gas_price_service" } +fuel-core-p2p = { version = "0.40.0", path = "./crates/services/p2p" } +fuel-core-producer = { version = "0.40.0", path = "./crates/services/producer" } +fuel-core-relayer = { version = "0.40.0", path = "./crates/services/relayer" } +fuel-core-sync = { version = "0.40.0", path = "./crates/services/sync" } +fuel-core-txpool = { version = "0.40.0", path = "./crates/services/txpool_v2" } +fuel-core-storage = { version = "0.40.0", path = "./crates/storage", default-features = false } +fuel-core-trace = { version = "0.40.0", path = "./crates/trace" } +fuel-core-types = { version = "0.40.0", path = "./crates/types", default-features = false } fuel-core-tests = { version = "0.0.0", path = "./tests" } -fuel-core-upgradable-executor = { version = "0.39.0", path = "./crates/services/upgradable-executor" } -fuel-core-wasm-executor = { version = "0.39.0", path = "./crates/services/upgradable-executor/wasm-executor", default-features = false } +fuel-core-upgradable-executor = { version = "0.40.0", path = "./crates/services/upgradable-executor" } +fuel-core-wasm-executor = { version = "0.40.0", path = "./crates/services/upgradable-executor/wasm-executor", default-features = false } fuel-core-xtask = { version = "0.0.0", path = "./xtask" } -fuel-gas-price-algorithm = { version = "0.39.0", path = "crates/fuel-gas-price-algorithm" } +fuel-gas-price-algorithm = { version = "0.40.0", path = "crates/fuel-gas-price-algorithm" } # Fuel dependencies fuel-vm-private = { version = "0.58.2", package = "fuel-vm", default-features = false } diff --git a/benches/benches/transaction_throughput.rs b/benches/benches/transaction_throughput.rs index 23454dda359..5d78818e8da 100644 --- a/benches/benches/transaction_throughput.rs +++ b/benches/benches/transaction_throughput.rs @@ -89,6 +89,7 @@ where test_builder.trigger = Trigger::Never; test_builder.utxo_validation = true; test_builder.gas_limit = Some(10_000_000_000); + test_builder.block_size_limit = Some(1_000_000_000_000); // spin up node let transactions: Vec = diff --git a/bin/fuel-core/Cargo.toml b/bin/fuel-core/Cargo.toml index aa111e7637c..76bbb6d0367 100644 --- a/bin/fuel-core/Cargo.toml +++ b/bin/fuel-core/Cargo.toml @@ -28,6 +28,7 @@ dotenvy = { version = "0.15", optional = true } fuel-core = { workspace = true, features = ["wasm-executor"] } fuel-core-chain-config = { workspace = true } fuel-core-compression = { workspace = true } +fuel-core-metrics = { workspace = true } fuel-core-poa = { workspace = true } fuel-core-types = { workspace = true, features = ["std"] } hex = { workspace = true } @@ -53,6 +54,7 @@ itertools = { workspace = true } pretty_assertions = { workspace = true } rand = { workspace = true } serde = { workspace = true } +strum = { workspace = true } tempfile = { workspace = true } test-case = { workspace = true } diff --git a/bin/fuel-core/chainspec/local-testnet/chain_config.json b/bin/fuel-core/chainspec/local-testnet/chain_config.json index 342086aaa1d..2977f4b4b08 100644 --- a/bin/fuel-core/chainspec/local-testnet/chain_config.json +++ b/bin/fuel-core/chainspec/local-testnet/chain_config.json @@ -297,7 +297,7 @@ "privileged_address": "9f0e19d6c2a6283a3222426ab2630d35516b1799b503f37b02105bebe1b8a3e9" } }, - "genesis_state_transition_version": 15, + "genesis_state_transition_version": 16, "consensus": { "PoAV2": { "genesis_signing_key": "e0a9fcde1b73f545252e01b30b50819eb9547d07531fa3df0385c5695736634d", diff --git a/bin/fuel-core/src/cli/run.rs b/bin/fuel-core/src/cli/run.rs index ab3523dec1a..6dc0349f4f8 100644 --- a/bin/fuel-core/src/cli/run.rs +++ b/bin/fuel-core/src/cli/run.rs @@ -22,6 +22,7 @@ use fuel_core::{ }, fuel_core_graphql_api::{ worker_service::DaCompressionConfig, + Costs, ServiceConfig as GraphQLConfig, }, producer::Config as ProducerConfig, @@ -50,6 +51,10 @@ use fuel_core_chain_config::{ SnapshotMetadata, SnapshotReader, }; +use fuel_core_metrics::config::{ + DisableConfig, + Module, +}; use fuel_core_poa::signer::SignMode; use fuel_core_types::blockchain::header::StateTransitionBytecodeVersion; use pyroscope::{ @@ -235,8 +240,8 @@ pub struct Command { #[cfg(feature = "p2p")] pub sync_args: p2p::SyncArgs, - #[arg(long = "metrics", env)] - pub metrics: bool, + #[arg(long = "disable-metrics", value_delimiter = ',', help = fuel_core_metrics::config::help_string(), env)] + pub disabled_metrics: Vec, #[clap(long = "verify-max-da-lag", default_value = "10", env)] pub max_da_lag: u64, @@ -293,7 +298,7 @@ impl Command { p2p_args, #[cfg(feature = "p2p")] sync_args, - metrics, + disabled_metrics: metrics, max_da_lag, max_wait_time, tx_pool, @@ -304,6 +309,14 @@ impl Command { profiling: _, } = self; + let enabled_metrics = metrics.list_of_enabled(); + + if !enabled_metrics.is_empty() { + info!("`{:?}` metrics are enabled", enabled_metrics); + } else { + info!("All metrics are disabled"); + } + let addr = net::SocketAddr::new(graphql.ip, graphql.port); let snapshot_reader = match snapshot.as_ref() { @@ -319,7 +332,10 @@ impl Command { let relayer_cfg = relayer_args.into_config(); #[cfg(feature = "p2p")] - let p2p_cfg = p2p_args.into_config(chain_config.chain_name.clone(), metrics)?; + let p2p_cfg = p2p_args.into_config( + chain_config.chain_name.clone(), + metrics.is_enabled(Module::P2P), + )?; let trigger: Trigger = poa_trigger.into(); @@ -427,8 +443,9 @@ impl Command { state_rewind_policy, }; - let block_importer = - fuel_core::service::config::fuel_core_importer::Config::new(); + let block_importer = fuel_core::service::config::fuel_core_importer::Config::new( + metrics.is_enabled(Module::Importer), + ); let da_compression = match da_compression { Some(retention) => { @@ -488,6 +505,8 @@ impl Command { let config = Config { graphql_config: GraphQLConfig { addr, + number_of_threads: graphql.graphql_number_of_threads, + database_batch_size: graphql.database_batch_size, max_queries_depth: graphql.graphql_max_depth, max_queries_complexity: graphql.graphql_max_complexity, max_queries_recursive_depth: graphql.graphql_max_recursive_depth, @@ -498,6 +517,29 @@ impl Command { request_body_bytes_limit: graphql.graphql_request_body_bytes_limit, api_request_timeout: graphql.api_request_timeout.into(), query_log_threshold_time: graphql.query_log_threshold_time.into(), + costs: Costs { + balance_query: graphql.costs.balance_query, + coins_to_spend: graphql.costs.coins_to_spend, + get_peers: graphql.costs.get_peers, + estimate_predicates: graphql.costs.estimate_predicates, + dry_run: graphql.costs.dry_run, + submit: graphql.costs.submit, + submit_and_await: graphql.costs.submit_and_await, + status_change: graphql.costs.status_change, + storage_read: graphql.costs.storage_read, + tx_get: graphql.costs.tx_get, + tx_status_read: graphql.costs.tx_status_read, + tx_raw_payload: graphql.costs.tx_raw_payload, + block_header: graphql.costs.block_header, + block_transactions: graphql.costs.block_transactions, + block_transactions_ids: graphql.costs.block_transactions_ids, + storage_iterator: graphql.costs.storage_iterator, + bytecode_read: graphql.costs.bytecode_read, + state_transition_bytecode_read: graphql + .costs + .state_transition_bytecode_read, + da_compressed_block_read: graphql.costs.da_compressed_block_read, + }, }, combined_db_config, snapshot_reader, @@ -523,7 +565,7 @@ impl Command { }, block_producer: ProducerConfig { coinbase_recipient, - metrics, + metrics: metrics.is_enabled(Module::Producer), }, starting_gas_price, gas_price_change_percent, @@ -633,3 +675,85 @@ fn start_pyroscope_agent( }) .transpose() } + +#[cfg(test)] +#[allow(non_snake_case)] +#[allow(clippy::bool_assert_comparison)] +mod tests { + use super::*; + use strum::IntoEnumIterator; + + fn parse_command(args: &[&str]) -> anyhow::Result { + Ok(Command::try_parse_from([""].iter().chain(args))?) + } + + #[test] + fn parse_disabled_metrics__no_value_enables_everything() { + // Given + let args = []; + + // When + let command = parse_command(&args).unwrap(); + + // Then + let config = command.disabled_metrics; + Module::iter().for_each(|module| { + assert_eq!(config.is_enabled(module), true); + }); + } + + #[test] + fn parse_disabled_metrics__all() { + // Given + let args = ["--disable-metrics", "all"]; + + // When + let command = parse_command(&args).unwrap(); + + // Then + let config = command.disabled_metrics; + Module::iter().for_each(|module| { + assert_eq!(config.is_enabled(module), false); + }); + } + + #[test] + fn parse_disabled_metrics__mixed_args() { + // Given + let args = [ + "--disable-metrics", + "txpool,importer", + "--disable-metrics", + "graphql", + ]; + + // When + let command = parse_command(&args).unwrap(); + + // Then + let config = command.disabled_metrics; + assert_eq!(config.is_enabled(Module::TxPool), false); + assert_eq!(config.is_enabled(Module::Importer), false); + assert_eq!(config.is_enabled(Module::GraphQL), false); + assert_eq!(config.is_enabled(Module::P2P), true); + assert_eq!(config.is_enabled(Module::Producer), true); + } + + #[test] + fn parse_disabled_metrics__bad_values() { + // Given + let args = ["--disable-metrics", "txpool,alpha,bravo"]; + + // When + let command = parse_command(&args); + + // Then + let err = command.expect_err("should fail to parse"); + assert_eq!( + err.to_string(), + "error: invalid value 'alpha' for \ + '--disable-metrics ': Matching variant not found\ + \n\nFor more information, try '--help'.\n" + ); + } +} diff --git a/bin/fuel-core/src/cli/run/graphql.rs b/bin/fuel-core/src/cli/run/graphql.rs index 95cfb2852f5..5816b2ecbc0 100644 --- a/bin/fuel-core/src/cli/run/graphql.rs +++ b/bin/fuel-core/src/cli/run/graphql.rs @@ -2,6 +2,8 @@ use std::net; +use fuel_core::fuel_core_graphql_api::DEFAULT_QUERY_COSTS; + #[derive(Debug, Clone, clap::Args)] pub struct GraphQLArgs { /// The IP address to bind the GraphQL service to. @@ -12,6 +14,14 @@ pub struct GraphQLArgs { #[clap(long = "port", default_value = "4000", env)] pub port: u16, + /// The number of threads to use for the GraphQL service. + #[clap(long = "graphql-number-of-threads", default_value = "2", env)] + pub graphql_number_of_threads: usize, + + /// The size of the batch fetched from the database by GraphQL service. + #[clap(long = "graphql-database-batch-size", default_value = "100", env)] + pub database_batch_size: usize, + /// The max depth of GraphQL queries. #[clap(long = "graphql-max-depth", default_value = "16", env)] pub graphql_max_depth: usize, @@ -55,4 +65,162 @@ pub struct GraphQLArgs { /// Timeout before drop the request. #[clap(long = "api-request-timeout", default_value = "30s", env)] pub api_request_timeout: humantime::Duration, + + #[clap(flatten)] + pub costs: QueryCosts, +} + +/// Costs for individual graphql queries. +#[derive(Debug, Clone, clap::Args)] +pub struct QueryCosts { + /// Query costs for getting balances. + #[clap( + long = "query-cost-balance-query", + default_value = DEFAULT_QUERY_COSTS.balance_query.to_string(), + env + )] + pub balance_query: usize, + + /// Query costs for getting coins to spend. + #[clap( + long = "query-cost-coins-to-spend", + default_value = DEFAULT_QUERY_COSTS.coins_to_spend.to_string(), + env)] + pub coins_to_spend: usize, + + /// Query costs for getting peers. + #[clap( + long = "query-cost-get-peers", + default_value = DEFAULT_QUERY_COSTS.get_peers.to_string(), + env + )] + pub get_peers: usize, + + /// Query costs for estimating predicates. + #[clap( + long = "query-cost-estimate-predicates", + default_value = DEFAULT_QUERY_COSTS.estimate_predicates.to_string(), + env + )] + pub estimate_predicates: usize, + + /// Query costs for dry running a set of transactions. + #[clap( + long = "query-cost-dry-run", + default_value = DEFAULT_QUERY_COSTS.dry_run.to_string(), + env + )] + pub dry_run: usize, + + /// Query costs for submitting a transaction. + #[clap( + long = "query-cost-submit", + default_value = DEFAULT_QUERY_COSTS.submit.to_string(), + env + )] + pub submit: usize, + + /// Query costs for submitting and awaiting a transaction. + #[clap( + long = "query-cost-submit-and-await", + default_value = DEFAULT_QUERY_COSTS.submit_and_await.to_string(), + env + )] + pub submit_and_await: usize, + + /// Query costs for the status change query. + #[clap( + long = "query-cost-status-change", + default_value = DEFAULT_QUERY_COSTS.status_change.to_string(), + env + )] + pub status_change: usize, + + /// Query costs for reading from storage. + #[clap( + long = "query-cost-storage-read", + default_value = DEFAULT_QUERY_COSTS.storage_read.to_string(), + env + )] + pub storage_read: usize, + + /// Query costs for getting a transaction. + #[clap( + long = "query-cost-tx-get", + default_value = DEFAULT_QUERY_COSTS.tx_get.to_string(), + env + )] + pub tx_get: usize, + + /// Query costs for reading tx status. + #[clap( + long = "query-cost-tx-status-read", + default_value = DEFAULT_QUERY_COSTS.tx_status_read.to_string(), + env + )] + pub tx_status_read: usize, + + /// Query costs for getting the raw tx payload. + #[clap( + long = "query-cost-tx-raw-payload", + default_value = DEFAULT_QUERY_COSTS.tx_raw_payload.to_string(), + env + )] + pub tx_raw_payload: usize, + + /// Query costs for block header. + #[clap( + long = "query-cost-block-header", + default_value = DEFAULT_QUERY_COSTS.block_header.to_string(), + env + )] + pub block_header: usize, + + /// Query costs for block transactions. + #[clap( + long = "query-cost-block-transactions", + default_value = DEFAULT_QUERY_COSTS.block_transactions.to_string(), + env + )] + pub block_transactions: usize, + + /// Query costs for block transactions ids. + #[clap( + long = "query-cost-block-transactions-ids", + default_value = DEFAULT_QUERY_COSTS.block_transactions_ids.to_string(), + env + )] + pub block_transactions_ids: usize, + + /// Query costs for iterating over storage entries. + #[clap( + long = "query-cost-storage-iterator", + default_value = DEFAULT_QUERY_COSTS.storage_iterator.to_string(), + env + )] + pub storage_iterator: usize, + + /// Query costs for reading bytecode. + #[clap( + long = "query-cost-bytecode-read", + default_value = DEFAULT_QUERY_COSTS.bytecode_read.to_string(), + env + )] + pub bytecode_read: usize, + + /// Query costs for reading state transition bytecode. + #[clap( + long = "query-cost-state-transition-bytecode-read", + default_value = DEFAULT_QUERY_COSTS.state_transition_bytecode_read.to_string(), + env + )] + pub state_transition_bytecode_read: usize, + + /// Query costs for reading a DA compressed block. + #[clap( + long = "query-cost-da-compressed-block-read", + default_value = DEFAULT_QUERY_COSTS.da_compressed_block_read.to_string(), + env + )] + pub da_compressed_block_read: usize, } diff --git a/crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap b/crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap index 96191c01131..c3123c6bb38 100644 --- a/crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap +++ b/crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap @@ -301,7 +301,7 @@ expression: json "privileged_address": "0000000000000000000000000000000000000000000000000000000000000000" } }, - "genesis_state_transition_version": 15, + "genesis_state_transition_version": 16, "consensus": { "PoAV2": { "genesis_signing_key": "22ec92c3105c942a6640bdc4e4907286ec4728e8cfc0d8ac59aad4d8e1ccaefb", diff --git a/crates/client/assets/schema.sdl b/crates/client/assets/schema.sdl index 67287341c38..b9048362caa 100644 --- a/crates/client/assets/schema.sdl +++ b/crates/client/assets/schema.sdl @@ -938,7 +938,7 @@ type Query { """ owner: Address!, """ - The list of requested assets` coins with asset ids, `target` amount the user wants to reach, and the `max` number of coins in the selection. Several entries with the same asset id are not allowed. + The list of requested assets` coins with asset ids, `target` amount the user wants to reach, and the `max` number of coins in the selection. Several entries with the same asset id are not allowed. The result can't contain more coins than `max_inputs`. """ queryPerAsset: [SpendQueryElementInput!]!, """ diff --git a/crates/fuel-core/src/coins_query.rs b/crates/fuel-core/src/coins_query.rs index 532b67a1bb5..3d7abda01bc 100644 --- a/crates/fuel-core/src/coins_query.rs +++ b/crates/fuel-core/src/coins_query.rs @@ -19,7 +19,7 @@ use fuel_core_types::{ Word, }, }; -use itertools::Itertools; +use futures::TryStreamExt; use rand::prelude::*; use std::{ cmp::Reverse, @@ -119,8 +119,13 @@ impl SpendQuery { /// Returns the biggest inputs of the `owner` to satisfy the required `target` of the asset. The /// number of inputs for each asset can't exceed `max_inputs`, otherwise throw an error that query /// can't be satisfied. -pub fn largest_first(query: &AssetQuery) -> Result, CoinsQueryError> { - let mut inputs: Vec<_> = query.coins().try_collect()?; +pub async fn largest_first( + query: AssetQuery<'_>, +) -> Result, CoinsQueryError> { + let target = query.asset.target; + let max = query.asset.max; + let asset_id = query.asset.id; + let mut inputs: Vec = query.coins().try_collect().await?; inputs.sort_by_key(|coin| Reverse(coin.amount())); let mut collected_amount = 0u64; @@ -128,12 +133,12 @@ pub fn largest_first(query: &AssetQuery) -> Result, CoinsQueryErro for coin in inputs { // Break if we don't need any more coins - if collected_amount >= query.asset.target { + if collected_amount >= target { break } // Error if we can't fit more coins - if coins.len() >= query.asset.max { + if coins.len() >= max as usize { return Err(CoinsQueryError::MaxCoinsReached) } @@ -142,9 +147,9 @@ pub fn largest_first(query: &AssetQuery) -> Result, CoinsQueryErro coins.push(coin); } - if collected_amount < query.asset.target { + if collected_amount < target { return Err(CoinsQueryError::InsufficientCoins { - asset_id: query.asset.id, + asset_id, collected_amount, }) } @@ -153,23 +158,32 @@ pub fn largest_first(query: &AssetQuery) -> Result, CoinsQueryErro } // An implementation of the method described on: https://iohk.io/en/blog/posts/2018/07/03/self-organisation-in-coin-selection/ -pub fn random_improve( +// TODO: Reimplement this algorithm to be simpler and faster: +// Instead of selecting random coins first, we can sort them. +// After that, we can split the coins into the part that covers the +// target and the part that does not(by choosing the most expensive coins). +// When the target is satisfied, we can select random coins from the remaining +// coins not used in the target. +// https://github.com/FuelLabs/fuel-core/issues/1965 +pub async fn random_improve( db: &ReadView, spend_query: &SpendQuery, ) -> Result>, CoinsQueryError> { let mut coins_per_asset = vec![]; for query in spend_query.asset_queries(db) { - let mut inputs: Vec<_> = query.coins().try_collect()?; + let target = query.asset.target; + let max = query.asset.max; + + let mut inputs: Vec<_> = query.clone().coins().try_collect().await?; inputs.shuffle(&mut thread_rng()); - inputs.truncate(query.asset.max); + inputs.truncate(max as usize); let mut collected_amount = 0; let mut coins = vec![]; // Set parameters according to spec - let target = query.asset.target; - let upper_target = query.asset.target.saturating_mul(2); + let upper_target = target.saturating_mul(2); for coin in inputs { // Try to improve the result by adding dust to the result. @@ -197,8 +211,8 @@ pub fn random_improve( } // Fallback to largest_first if we can't fit more coins - if collected_amount < query.asset.target { - swap(&mut coins, &mut largest_first(&query)?); + if collected_amount < target { + swap(&mut coins, &mut largest_first(query).await?); } coins_per_asset.push(coins); @@ -266,6 +280,7 @@ mod tests { fuel_asm::Word, fuel_tx::*, }; + use futures::TryStreamExt; use itertools::Itertools; use rand::{ rngs::StdRng, @@ -324,34 +339,36 @@ mod tests { mod largest_first { use super::*; - fn query( + async fn query( spend_query: &[AssetSpendTarget], owner: &Address, base_asset_id: &AssetId, db: &ServiceDatabase, ) -> Result>, CoinsQueryError> { - let result: Vec<_> = spend_query - .iter() - .map(|asset| { - largest_first(&AssetQuery::new( - owner, - asset, - base_asset_id, - None, - &db.test_view(), - )) - .map(|coins| { - coins - .iter() - .map(|coin| (*coin.asset_id(base_asset_id), coin.amount())) - .collect() - }) - }) - .try_collect()?; - Ok(result) + let mut results = vec![]; + + for asset in spend_query { + let coins = largest_first(AssetQuery::new( + owner, + asset, + base_asset_id, + None, + &db.test_view(), + )) + .await + .map(|coins| { + coins + .iter() + .map(|coin| (*coin.asset_id(base_asset_id), coin.amount())) + .collect() + })?; + results.push(coins); + } + + Ok(results) } - fn single_asset_assert( + async fn single_asset_assert( owner: Address, asset_ids: &[AssetId], base_asset_id: &AssetId, @@ -362,11 +379,12 @@ mod tests { // Query some targets, including higher than the owner's balance for target in 0..20 { let coins = query( - &[AssetSpendTarget::new(asset_id, target, usize::MAX)], + &[AssetSpendTarget::new(asset_id, target, u16::MAX)], &owner, base_asset_id, &db.service_database(), - ); + ) + .await; // Transform result for convenience let coins = coins.map(|coins| { @@ -425,32 +443,33 @@ mod tests { &owner, base_asset_id, &db.service_database(), - ); + ) + .await; assert_matches!(coins, Err(CoinsQueryError::MaxCoinsReached)); } - #[test] - fn single_asset_coins() { + #[tokio::test] + async fn single_asset_coins() { // Setup for coins let (owner, asset_ids, base_asset_id, db) = setup_coins(); - single_asset_assert(owner, &asset_ids, &base_asset_id, db); + single_asset_assert(owner, &asset_ids, &base_asset_id, db).await; } - #[test] - fn single_asset_messages() { + #[tokio::test] + async fn single_asset_messages() { // Setup for messages let (owner, base_asset_id, db) = setup_messages(); - single_asset_assert(owner, &[base_asset_id], &base_asset_id, db); + single_asset_assert(owner, &[base_asset_id], &base_asset_id, db).await; } - #[test] - fn single_asset_coins_and_messages() { + #[tokio::test] + async fn single_asset_coins_and_messages() { // Setup for coins and messages let (owner, asset_ids, base_asset_id, db) = setup_coins_and_messages(); - single_asset_assert(owner, &asset_ids, &base_asset_id, db); + single_asset_assert(owner, &asset_ids, &base_asset_id, db).await; } - fn multiple_assets_helper( + async fn multiple_assets_helper( owner: Address, asset_ids: &[AssetId], base_asset_id: &AssetId, @@ -458,13 +477,14 @@ mod tests { ) { let coins = query( &[ - AssetSpendTarget::new(asset_ids[0], 3, usize::MAX), - AssetSpendTarget::new(asset_ids[1], 6, usize::MAX), + AssetSpendTarget::new(asset_ids[0], 3, u16::MAX), + AssetSpendTarget::new(asset_ids[1], 6, u16::MAX), ], &owner, base_asset_id, &db.service_database(), - ); + ) + .await; let expected = vec![ vec![(asset_ids[0], 5)], vec![(asset_ids[1], 5), (asset_ids[1], 4)], @@ -472,25 +492,25 @@ mod tests { assert_matches!(coins, Ok(coins) if coins == expected); } - #[test] - fn multiple_assets_coins() { + #[tokio::test] + async fn multiple_assets_coins() { // Setup coins let (owner, asset_ids, base_asset_id, db) = setup_coins(); - multiple_assets_helper(owner, &asset_ids, &base_asset_id, db); + multiple_assets_helper(owner, &asset_ids, &base_asset_id, db).await; } - #[test] - fn multiple_assets_coins_and_messages() { + #[tokio::test] + async fn multiple_assets_coins_and_messages() { // Setup coins and messages let (owner, asset_ids, base_asset_id, db) = setup_coins_and_messages(); - multiple_assets_helper(owner, &asset_ids, &base_asset_id, db); + multiple_assets_helper(owner, &asset_ids, &base_asset_id, db).await; } } mod random_improve { use super::*; - fn query( + async fn query( query_per_asset: Vec, owner: Address, asset_ids: &[AssetId], @@ -500,7 +520,8 @@ mod tests { let coins = random_improve( &db.test_view(), &SpendQuery::new(owner, &query_per_asset, None, base_asset_id)?, - ); + ) + .await; // Transform result for convenience coins.map(|coins| { @@ -521,7 +542,7 @@ mod tests { }) } - fn single_asset_assert( + async fn single_asset_assert( owner: Address, asset_ids: &[AssetId], base_asset_id: AssetId, @@ -532,12 +553,13 @@ mod tests { // Query some amounts, including higher than the owner's balance for amount in 0..20 { let coins = query( - vec![AssetSpendTarget::new(asset_id, amount, usize::MAX)], + vec![AssetSpendTarget::new(asset_id, amount, u16::MAX)], owner, asset_ids, base_asset_id, &db.service_database(), - ); + ) + .await; // Transform result for convenience let coins = coins.map(|coins| { @@ -589,32 +611,33 @@ mod tests { asset_ids, base_asset_id, &db.service_database(), - ); + ) + .await; assert_matches!(coins, Err(CoinsQueryError::MaxCoinsReached)); } - #[test] - fn single_asset_coins() { + #[tokio::test] + async fn single_asset_coins() { // Setup for coins let (owner, asset_ids, base_asset_id, db) = setup_coins(); - single_asset_assert(owner, &asset_ids, base_asset_id, db); + single_asset_assert(owner, &asset_ids, base_asset_id, db).await; } - #[test] - fn single_asset_messages() { + #[tokio::test] + async fn single_asset_messages() { // Setup for messages let (owner, base_asset_id, db) = setup_messages(); - single_asset_assert(owner, &[base_asset_id], base_asset_id, db); + single_asset_assert(owner, &[base_asset_id], base_asset_id, db).await; } - #[test] - fn single_asset_coins_and_messages() { + #[tokio::test] + async fn single_asset_coins_and_messages() { // Setup for coins and messages let (owner, asset_ids, base_asset_id, db) = setup_coins_and_messages(); - single_asset_assert(owner, &asset_ids, base_asset_id, db); + single_asset_assert(owner, &asset_ids, base_asset_id, db).await; } - fn multiple_assets_assert( + async fn multiple_assets_assert( owner: Address, asset_ids: &[AssetId], base_asset_id: AssetId, @@ -638,7 +661,8 @@ mod tests { asset_ids, base_asset_id, &db.service_database(), - ); + ) + .await; assert_matches!(coins, Ok(ref coins) if coins.len() <= 6); let coins = coins.unwrap(); assert!( @@ -659,18 +683,18 @@ mod tests { ); } - #[test] - fn multiple_assets_coins() { + #[tokio::test] + async fn multiple_assets_coins() { // Setup coins let (owner, asset_ids, base_asset_id, db) = setup_coins(); - multiple_assets_assert(owner, &asset_ids, base_asset_id, db); + multiple_assets_assert(owner, &asset_ids, base_asset_id, db).await; } - #[test] - fn multiple_assets_coins_and_messages() { + #[tokio::test] + async fn multiple_assets_coins_and_messages() { // Setup coins and messages let (owner, asset_ids, base_asset_id, db) = setup_coins_and_messages(); - multiple_assets_assert(owner, &asset_ids, base_asset_id, db); + multiple_assets_assert(owner, &asset_ids, base_asset_id, db).await; } } @@ -678,7 +702,41 @@ mod tests { use super::*; use fuel_core_types::entities::coins::CoinId; - fn exclusion_assert( + async fn query( + db: &ServiceDatabase, + owner: Address, + base_asset_id: AssetId, + asset_ids: &[AssetId], + query_per_asset: Vec, + excluded_ids: Vec, + ) -> Result, CoinsQueryError> { + let spend_query = SpendQuery::new( + owner, + &query_per_asset, + Some(excluded_ids), + base_asset_id, + )?; + let coins = random_improve(&db.test_view(), &spend_query).await; + + // Transform result for convenience + coins.map(|coins| { + coins + .into_iter() + .flat_map(|coin| { + coin.into_iter() + .map(|coin| (*coin.asset_id(&base_asset_id), coin.amount())) + .sorted_by_key(|(asset_id, amount)| { + ( + asset_ids.iter().position(|c| c == asset_id).unwrap(), + Reverse(*amount), + ) + }) + }) + .collect() + }) + } + + async fn exclusion_assert( owner: Address, asset_ids: &[AssetId], base_asset_id: AssetId, @@ -687,47 +745,17 @@ mod tests { ) { let asset_id = asset_ids[0]; - let query = |query_per_asset: Vec, - excluded_ids: Vec| - -> Result, CoinsQueryError> { - let spend_query = SpendQuery::new( - owner, - &query_per_asset, - Some(excluded_ids), - base_asset_id, - )?; - let coins = - random_improve(&db.service_database().test_view(), &spend_query); - - // Transform result for convenience - coins.map(|coins| { - coins - .into_iter() - .flat_map(|coin| { - coin.into_iter() - .map(|coin| { - (*coin.asset_id(&base_asset_id), coin.amount()) - }) - .sorted_by_key(|(asset_id, amount)| { - ( - asset_ids - .iter() - .position(|c| c == asset_id) - .unwrap(), - Reverse(*amount), - ) - }) - }) - .collect() - }) - }; - // Query some amounts, including higher than the owner's balance for amount in 0..20 { let coins = query( - vec![AssetSpendTarget::new(asset_id, amount, usize::MAX)], + &db.service_database(), + owner, + base_asset_id, + asset_ids, + vec![AssetSpendTarget::new(asset_id, amount, u16::MAX)], excluded_ids.clone(), - ); + ) + .await; // Transform result for convenience let coins = coins.map(|coins| { @@ -769,52 +797,56 @@ mod tests { } } - #[test] - fn exclusion_coins() { + #[tokio::test] + async fn exclusion_coins() { // Setup coins let (owner, asset_ids, base_asset_id, db) = setup_coins(); // Exclude largest coin IDs let excluded_ids = db .owned_coins(&owner) + .await .into_iter() .filter(|coin| coin.amount == 5) .map(|coin| CoinId::Utxo(coin.utxo_id)) .collect_vec(); - exclusion_assert(owner, &asset_ids, base_asset_id, db, excluded_ids); + exclusion_assert(owner, &asset_ids, base_asset_id, db, excluded_ids).await; } - #[test] - fn exclusion_messages() { + #[tokio::test] + async fn exclusion_messages() { // Setup messages let (owner, base_asset_id, db) = setup_messages(); // Exclude largest messages IDs let excluded_ids = db .owned_messages(&owner) + .await .into_iter() .filter(|message| message.amount() == 5) .map(|message| CoinId::Message(*message.id())) .collect_vec(); - exclusion_assert(owner, &[base_asset_id], base_asset_id, db, excluded_ids); + exclusion_assert(owner, &[base_asset_id], base_asset_id, db, excluded_ids) + .await; } - #[test] - fn exclusion_coins_and_messages() { + #[tokio::test] + async fn exclusion_coins_and_messages() { // Setup coins and messages let (owner, asset_ids, base_asset_id, db) = setup_coins_and_messages(); // Exclude largest messages IDs, because coins only 1 and 2 let excluded_ids = db .owned_messages(&owner) + .await .into_iter() .filter(|message| message.amount() == 5) .map(|message| CoinId::Message(*message.id())) .collect_vec(); - exclusion_assert(owner, &asset_ids, base_asset_id, db, excluded_ids); + exclusion_assert(owner, &asset_ids, base_asset_id, db, excluded_ids).await; } } @@ -822,7 +854,7 @@ mod tests { struct TestCase { db_amount: Vec, target_amount: u64, - max_coins: usize, + max_coins: u16, } pub enum CoinType { @@ -830,7 +862,7 @@ mod tests { Message, } - fn test_case_run( + async fn test_case_run( case: TestCase, coin_type: CoinType, base_asset_id: AssetId, @@ -866,23 +898,26 @@ mod tests { None, base_asset_id, )?, - )?; + ) + .await?; assert_eq!(coins.len(), 1); Ok(coins[0].len()) } - #[test] - fn insufficient_coins_returns_error() { + #[tokio::test] + async fn insufficient_coins_returns_error() { let test_case = TestCase { db_amount: vec![0], target_amount: u64::MAX, - max_coins: usize::MAX, + max_coins: u16::MAX, }; let mut rng = StdRng::seed_from_u64(0xF00DF00D); let base_asset_id = rng.gen(); - let coin_result = test_case_run(test_case.clone(), CoinType::Coin, base_asset_id); - let message_result = test_case_run(test_case, CoinType::Message, base_asset_id); + let coin_result = + test_case_run(test_case.clone(), CoinType::Coin, base_asset_id).await; + let message_result = + test_case_run(test_case, CoinType::Message, base_asset_id).await; assert_eq!(coin_result, message_result); assert_matches!( coin_result, @@ -897,7 +932,7 @@ mod tests { TestCase { db_amount: vec![u64::MAX, u64::MAX], target_amount: u64::MAX, - max_coins: usize::MAX, + max_coins: u16::MAX, } => Ok(1) ; "Enough coins in the DB to reach target(u64::MAX) by 1 coin" @@ -920,11 +955,13 @@ mod tests { => Err(CoinsQueryError::MaxCoinsReached) ; "Enough coins in the DB to reach target(u64::MAX) but limit is zero" )] - fn corner_cases(case: TestCase) -> Result { + #[tokio::test] + async fn corner_cases(case: TestCase) -> Result { let mut rng = StdRng::seed_from_u64(0xF00DF00D); let base_asset_id = rng.gen(); - let coin_result = test_case_run(case.clone(), CoinType::Coin, base_asset_id); - let message_result = test_case_run(case, CoinType::Message, base_asset_id); + let coin_result = + test_case_run(case.clone(), CoinType::Coin, base_asset_id).await; + let message_result = test_case_run(case, CoinType::Message, base_asset_id).await; assert_eq!(coin_result, message_result); coin_result } @@ -948,7 +985,7 @@ mod tests { fn service_database(&self) -> ServiceDatabase { let on_chain = self.database.on_chain().clone(); let off_chain = self.database.off_chain().clone(); - ServiceDatabase::new(0u32.into(), on_chain, off_chain) + ServiceDatabase::new(100, 0u32.into(), on_chain, off_chain) } } @@ -1001,25 +1038,23 @@ mod tests { message } - pub fn owned_coins(&self, owner: &Address) -> Vec { - use crate::query::CoinQueryData; + pub async fn owned_coins(&self, owner: &Address) -> Vec { let query = self.service_database(); let query = query.test_view(); query - .owned_coins_ids(owner, None, IterDirection::Forward) - .map(|res| res.map(|id| query.coin(id).unwrap())) + .owned_coins(owner, None, IterDirection::Forward) .try_collect() + .await .unwrap() } - pub fn owned_messages(&self, owner: &Address) -> Vec { - use crate::query::MessageQueryData; + pub async fn owned_messages(&self, owner: &Address) -> Vec { let query = self.service_database(); let query = query.test_view(); query - .owned_message_ids(owner, None, IterDirection::Forward) - .map(|res| res.map(|id| query.message(&id).unwrap())) + .owned_messages(owner, None, IterDirection::Forward) .try_collect() + .await .unwrap() } } diff --git a/crates/fuel-core/src/database/block.rs b/crates/fuel-core/src/database/block.rs index c6295e62017..374a76a9663 100644 --- a/crates/fuel-core/src/database/block.rs +++ b/crates/fuel-core/src/database/block.rs @@ -67,7 +67,8 @@ impl OnChainIterableKeyValueView { let db_block = self.storage::().get(height)?; if let Some(block) = db_block { // fetch all the transactions - // TODO: optimize with multi-key get + // TODO: Use multiget when it's implemented. + // https://github.com/FuelLabs/fuel-core/issues/2344 let txs = block .transactions() .iter() diff --git a/crates/fuel-core/src/graphql_api.rs b/crates/fuel-core/src/graphql_api.rs index 6d0d280fd3f..772bbc815ea 100644 --- a/crates/fuel-core/src/graphql_api.rs +++ b/crates/fuel-core/src/graphql_api.rs @@ -4,6 +4,7 @@ use fuel_core_storage::{ }; use std::{ net::SocketAddr, + sync::OnceLock, time::Duration, }; @@ -17,9 +18,22 @@ pub(crate) mod validation_extension; pub(crate) mod view_extension; pub mod worker_service; +#[derive(Clone, Debug)] +pub struct Config { + pub config: ServiceConfig, + pub utxo_validation: bool, + pub debug: bool, + pub vm_backtrace: bool, + pub max_tx: usize, + pub max_txpool_dependency_chain_length: usize, + pub chain_name: String, +} + #[derive(Clone, Debug)] pub struct ServiceConfig { pub addr: SocketAddr, + pub number_of_threads: usize, + pub database_batch_size: usize, pub max_queries_depth: usize, pub max_queries_complexity: usize, pub max_queries_recursive_depth: usize, @@ -30,8 +44,11 @@ pub struct ServiceConfig { /// Time to wait after submitting a query before debug info will be logged about query. pub query_log_threshold_time: Duration, pub api_request_timeout: Duration, + /// Configurable cost parameters to limit graphql queries complexity + pub costs: Costs, } +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Costs { pub balance_query: usize, pub coins_to_spend: usize, @@ -54,16 +71,19 @@ pub struct Costs { pub da_compressed_block_read: usize, } -pub const QUERY_COSTS: Costs = Costs { - // balance_query: 4000, +#[cfg(feature = "test-helpers")] +impl Default for Costs { + fn default() -> Self { + DEFAULT_QUERY_COSTS + } +} + +pub const DEFAULT_QUERY_COSTS: Costs = Costs { balance_query: 40001, coins_to_spend: 40001, - // get_peers: 2000, get_peers: 40001, - // estimate_predicates: 3000, estimate_predicates: 40001, dry_run: 12000, - // submit: 5000, submit: 40001, submit_and_await: 40001, status_change: 40001, @@ -80,15 +100,24 @@ pub const QUERY_COSTS: Costs = Costs { da_compressed_block_read: 4000, }; -#[derive(Clone, Debug)] -pub struct Config { - pub config: ServiceConfig, - pub utxo_validation: bool, - pub debug: bool, - pub vm_backtrace: bool, - pub max_tx: usize, - pub max_txpool_dependency_chain_length: usize, - pub chain_name: String, +pub fn query_costs() -> &'static Costs { + QUERY_COSTS.get().unwrap_or(&DEFAULT_QUERY_COSTS) +} + +pub static QUERY_COSTS: OnceLock = OnceLock::new(); + +fn initialize_query_costs(costs: Costs) -> anyhow::Result<()> { + #[cfg(feature = "test-helpers")] + if costs != DEFAULT_QUERY_COSTS { + // We don't support setting these values in test contexts, because + // it can lead to unexpected behavior if multiple tests try to + // initialize different values. + anyhow::bail!("cannot initialize queries with non-default costs in tests") + } + + QUERY_COSTS.get_or_init(|| costs); + + Ok(()) } pub trait IntoApiResult { diff --git a/crates/fuel-core/src/graphql_api/api_service.rs b/crates/fuel-core/src/graphql_api/api_service.rs index ec79918e09c..28a714f8b0e 100644 --- a/crates/fuel-core/src/graphql_api/api_service.rs +++ b/crates/fuel-core/src/graphql_api/api_service.rs @@ -15,6 +15,7 @@ use crate::{ view_extension::ViewExtension, Config, }, + graphql_api, schema::{ CoreSchema, CoreSchemaBuilder, @@ -58,11 +59,8 @@ use axum::{ Json, Router, }; -use fuel_core_metrics::futures::{ - metered_future::MeteredFuture, - FuturesMetrics, -}; use fuel_core_services::{ + AsyncProcessor, RunnableService, RunnableTask, StateWatcher, @@ -79,6 +77,7 @@ use std::{ TcpListener, }, pin::Pin, + sync::Arc, }; use tokio_stream::StreamExt; use tower::limit::ConcurrencyLimitLayer; @@ -115,6 +114,7 @@ pub struct GraphqlService { pub struct ServerParams { router: Router, listener: TcpListener, + number_of_threads: usize, } pub struct Task { @@ -124,7 +124,7 @@ pub struct Task { #[derive(Clone)] struct ExecutorWithMetrics { - metric: FuturesMetrics, + processor: Arc, } impl Executor for ExecutorWithMetrics @@ -133,9 +133,11 @@ where F::Output: Send + 'static, { fn execute(&self, fut: F) { - let future = MeteredFuture::new(fut, self.metric.clone()); + let result = self.processor.try_spawn(fut); - tokio::task::spawn(future); + if let Err(err) = result { + tracing::error!("Failed to spawn a task for GraphQL: {:?}", err); + } } } @@ -159,14 +161,25 @@ impl RunnableService for GraphqlService { params: Self::TaskParams, ) -> anyhow::Result { let mut state = state.clone(); - let ServerParams { router, listener } = params; - let metric = ExecutorWithMetrics { - metric: FuturesMetrics::obtain_futures_metrics("GraphQLFutures"), + let ServerParams { + router, + listener, + number_of_threads, + } = params; + + let processor = AsyncProcessor::new( + "GraphQLFutures", + number_of_threads, + tokio::sync::Semaphore::MAX_PERMITS, + )?; + + let executor = ExecutorWithMetrics { + processor: Arc::new(processor), }; let server = axum::Server::from_tcp(listener) .unwrap() - .executor(metric) + .executor(executor) .serve(router.into_make_service()) .with_graceful_shutdown(async move { state @@ -220,14 +233,21 @@ where OnChain::LatestView: OnChainDatabase, OffChain::LatestView: OffChainDatabase, { + graphql_api::initialize_query_costs(config.config.costs.clone())?; + let network_addr = config.config.addr; - let combined_read_database = - ReadDatabase::new(genesis_block_height, on_database, off_database); + let combined_read_database = ReadDatabase::new( + config.config.database_batch_size, + genesis_block_height, + on_database, + off_database, + ); let request_timeout = config.config.api_request_timeout; let concurrency_limit = config.config.max_concurrent_queries; let body_limit = config.config.request_body_bytes_limit; let max_queries_resolver_recursive_depth = config.config.max_queries_resolver_recursive_depth; + let number_of_threads = config.config.number_of_threads; let schema = schema .limit_complexity(config.config.max_queries_complexity) @@ -292,7 +312,11 @@ where Ok(Service::new_with_params( GraphqlService { bound_address }, - ServerParams { router, listener }, + ServerParams { + router, + listener, + number_of_threads, + }, )) } diff --git a/crates/fuel-core/src/graphql_api/database.rs b/crates/fuel-core/src/graphql_api/database.rs index 418e14c1318..bf47c8d92a7 100644 --- a/crates/fuel-core/src/graphql_api/database.rs +++ b/crates/fuel-core/src/graphql_api/database.rs @@ -1,16 +1,11 @@ use crate::fuel_core_graphql_api::{ database::arc_wrapper::ArcWrapper, ports::{ - DatabaseBlocks, - DatabaseChain, - DatabaseContracts, - DatabaseMessageProof, - DatabaseMessages, - DatabaseRelayedTransactions, OffChainDatabase, OnChainDatabase, }, }; +use fuel_core_services::yield_stream::StreamYieldExt; use fuel_core_storage::{ iter::{ BoxedIter, @@ -67,13 +62,12 @@ use fuel_core_types::{ txpool::TransactionStatus, }, }; +use futures::Stream; use std::{ borrow::Cow, sync::Arc, }; -use super::ports::DatabaseDaCompressedBlocks; - mod arc_wrapper; /// The on-chain view of the database used by the [`ReadView`] to fetch on-chain data. @@ -84,6 +78,8 @@ pub type OffChainView = Arc; /// The container of the on-chain and off-chain database view provides. /// It is used only by `ViewExtension` to create a [`ReadView`]. pub struct ReadDatabase { + /// The size of the batch during fetching from the database. + batch_size: usize, /// The height of the genesis block. genesis_height: BlockHeight, /// The on-chain database view provider. @@ -95,6 +91,7 @@ pub struct ReadDatabase { impl ReadDatabase { /// Creates a new [`ReadDatabase`] with the given on-chain and off-chain database view providers. pub fn new( + batch_size: usize, genesis_height: BlockHeight, on_chain: OnChain, off_chain: OffChain, @@ -106,6 +103,7 @@ impl ReadDatabase { OffChain::LatestView: OffChainDatabase, { Self { + batch_size, genesis_height, on_chain: Box::new(ArcWrapper::new(on_chain)), off_chain: Box::new(ArcWrapper::new(off_chain)), @@ -118,6 +116,7 @@ impl ReadDatabase { // It is not possible to implement until `view_at` is implemented for the `AtomicView`. // https://github.com/FuelLabs/fuel-core/issues/1582 Ok(ReadView { + batch_size: self.batch_size, genesis_height: self.genesis_height, on_chain: self.on_chain.latest_view()?, off_chain: self.off_chain.latest_view()?, @@ -132,16 +131,17 @@ impl ReadDatabase { #[derive(Clone)] pub struct ReadView { - genesis_height: BlockHeight, - on_chain: OnChainView, - off_chain: OffChainView, + pub(crate) batch_size: usize, + pub(crate) genesis_height: BlockHeight, + pub(crate) on_chain: OnChainView, + pub(crate) off_chain: OffChainView, } -impl DatabaseBlocks for ReadView { - fn transaction(&self, tx_id: &TxId) -> StorageResult { +impl ReadView { + pub fn transaction(&self, tx_id: &TxId) -> StorageResult { let result = self.on_chain.transaction(tx_id); if result.is_not_found() { - if let Some(tx) = self.old_transaction(tx_id)? { + if let Some(tx) = self.off_chain.old_transaction(tx_id)? { Ok(tx) } else { Err(not_found!(Transactions)) @@ -151,7 +151,22 @@ impl DatabaseBlocks for ReadView { } } - fn block(&self, height: &BlockHeight) -> StorageResult { + pub async fn transactions( + &self, + tx_ids: Vec, + ) -> Vec> { + // TODO: Use multiget when it's implemented. + // https://github.com/FuelLabs/fuel-core/issues/2344 + let result = tx_ids + .iter() + .map(|tx_id| self.transaction(tx_id)) + .collect::>(); + // Give a chance to other tasks to run. + tokio::task::yield_now().await; + result + } + + pub fn block(&self, height: &BlockHeight) -> StorageResult { if *height >= self.genesis_height { self.on_chain.block(height) } else { @@ -159,7 +174,7 @@ impl DatabaseBlocks for ReadView { } } - fn blocks( + pub fn blocks( &self, height: Option, direction: IterDirection, @@ -202,11 +217,11 @@ impl DatabaseBlocks for ReadView { } } - fn latest_height(&self) -> StorageResult { + pub fn latest_height(&self) -> StorageResult { self.on_chain.latest_height() } - fn consensus(&self, id: &BlockHeight) -> StorageResult { + pub fn consensus(&self, id: &BlockHeight) -> StorageResult { if *id >= self.genesis_height { self.on_chain.consensus(id) } else { @@ -215,45 +230,34 @@ impl DatabaseBlocks for ReadView { } } -impl DatabaseDaCompressedBlocks for ReadView { - fn da_compressed_block(&self, id: &BlockHeight) -> StorageResult> { - self.off_chain.da_compressed_block(id) - } - - fn latest_height(&self) -> StorageResult { - self.on_chain.latest_height() - } -} - -impl StorageInspect for ReadView -where - M: Mappable, - dyn OnChainDatabase: StorageInspect, -{ +impl StorageInspect for ReadView { type Error = StorageError; - fn get(&self, key: &M::Key) -> StorageResult>> { - self.on_chain.get(key) + fn get( + &self, + key: &::Key, + ) -> StorageResult::OwnedValue>>> { + StorageInspect::::get(self.on_chain.as_ref(), key) } - fn contains_key(&self, key: &M::Key) -> StorageResult { - self.on_chain.contains_key(key) + fn contains_key(&self, key: &::Key) -> StorageResult { + StorageInspect::::contains_key(self.on_chain.as_ref(), key) } } impl StorageSize for ReadView { fn size_of_value(&self, key: &BlobId) -> Result, Self::Error> { - self.on_chain.size_of_value(key) + StorageSize::::size_of_value(self.on_chain.as_ref(), key) } } impl StorageRead for ReadView { fn read(&self, key: &BlobId, buf: &mut [u8]) -> Result, Self::Error> { - self.on_chain.read(key, buf) + StorageRead::::read(self.on_chain.as_ref(), key, buf) } fn read_alloc(&self, key: &BlobId) -> Result>, Self::Error> { - self.on_chain.read_alloc(key) + StorageRead::::read_alloc(self.on_chain.as_ref(), key) } } @@ -263,50 +267,46 @@ impl PredicateStorageRequirements for ReadView { } } -impl DatabaseMessages for ReadView { - fn all_messages( +impl ReadView { + pub fn all_messages( &self, start_message_id: Option, direction: IterDirection, - ) -> BoxedIter<'_, StorageResult> { - self.on_chain.all_messages(start_message_id, direction) + ) -> impl Stream> + '_ { + futures::stream::iter(self.on_chain.all_messages(start_message_id, direction)) + .yield_each(self.batch_size) } - fn message_exists(&self, nonce: &Nonce) -> StorageResult { + pub fn message_exists(&self, nonce: &Nonce) -> StorageResult { self.on_chain.message_exists(nonce) } -} -impl DatabaseRelayedTransactions for ReadView { - fn transaction_status( + pub fn relayed_transaction_status( &self, id: Bytes32, ) -> StorageResult> { - let maybe_status = self.off_chain.relayed_tx_status(id)?; - Ok(maybe_status) + self.off_chain.relayed_tx_status(id) } -} -impl DatabaseContracts for ReadView { - fn contract_balances( + pub fn contract_balances( &self, contract: ContractId, start_asset: Option, direction: IterDirection, - ) -> BoxedIter> { - self.on_chain - .contract_balances(contract, start_asset, direction) + ) -> impl Stream> + '_ { + futures::stream::iter(self.on_chain.contract_balances( + contract, + start_asset, + direction, + )) + .yield_each(self.batch_size) } -} -impl DatabaseChain for ReadView { - fn da_height(&self) -> StorageResult { + pub fn da_height(&self) -> StorageResult { self.on_chain.da_height() } -} -impl DatabaseMessageProof for ReadView { - fn block_history_proof( + pub fn block_history_proof( &self, message_block_height: &BlockHeight, commit_block_height: &BlockHeight, @@ -316,85 +316,67 @@ impl DatabaseMessageProof for ReadView { } } -impl OnChainDatabase for ReadView {} - -impl OffChainDatabase for ReadView { - fn block_height(&self, block_id: &BlockId) -> StorageResult { +impl ReadView { + pub fn block_height(&self, block_id: &BlockId) -> StorageResult { self.off_chain.block_height(block_id) } - fn da_compressed_block(&self, height: &BlockHeight) -> StorageResult> { + pub fn da_compressed_block(&self, height: &BlockHeight) -> StorageResult> { self.off_chain.da_compressed_block(height) } - fn tx_status(&self, tx_id: &TxId) -> StorageResult { + pub fn tx_status(&self, tx_id: &TxId) -> StorageResult { self.off_chain.tx_status(tx_id) } - fn owned_coins_ids( + pub fn owned_coins_ids( &self, owner: &Address, start_coin: Option, direction: IterDirection, - ) -> BoxedIter<'_, StorageResult> { - self.off_chain.owned_coins_ids(owner, start_coin, direction) + ) -> impl Stream> + '_ { + let iter = self.off_chain.owned_coins_ids(owner, start_coin, direction); + + futures::stream::iter(iter) } - fn owned_message_ids( + pub fn owned_message_ids( &self, owner: &Address, start_message_id: Option, direction: IterDirection, - ) -> BoxedIter<'_, StorageResult> { - self.off_chain - .owned_message_ids(owner, start_message_id, direction) + ) -> impl Stream> + '_ { + futures::stream::iter(self.off_chain.owned_message_ids( + owner, + start_message_id, + direction, + )) } - fn owned_transactions_ids( + pub fn owned_transactions_ids( &self, owner: Address, start: Option, direction: IterDirection, - ) -> BoxedIter> { - self.off_chain - .owned_transactions_ids(owner, start, direction) + ) -> impl Stream> + '_ { + futures::stream::iter( + self.off_chain + .owned_transactions_ids(owner, start, direction), + ) } - fn contract_salt(&self, contract_id: &ContractId) -> StorageResult { + pub fn contract_salt(&self, contract_id: &ContractId) -> StorageResult { self.off_chain.contract_salt(contract_id) } - fn old_block(&self, height: &BlockHeight) -> StorageResult { - self.off_chain.old_block(height) - } - - fn old_blocks( - &self, - height: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult> { - self.off_chain.old_blocks(height, direction) - } - - fn old_block_consensus(&self, height: &BlockHeight) -> StorageResult { - self.off_chain.old_block_consensus(height) - } - - fn old_transaction( - &self, - id: &TxId, - ) -> StorageResult> { - self.off_chain.old_transaction(id) - } - - fn relayed_tx_status( + pub fn relayed_tx_status( &self, id: Bytes32, ) -> StorageResult> { self.off_chain.relayed_tx_status(id) } - fn message_is_spent(&self, nonce: &Nonce) -> StorageResult { + pub fn message_is_spent(&self, nonce: &Nonce) -> StorageResult { self.off_chain.message_is_spent(nonce) } } diff --git a/crates/fuel-core/src/graphql_api/metrics_extension.rs b/crates/fuel-core/src/graphql_api/metrics_extension.rs index c8659701d65..9f89c693b6b 100644 --- a/crates/fuel-core/src/graphql_api/metrics_extension.rs +++ b/crates/fuel-core/src/graphql_api/metrics_extension.rs @@ -6,11 +6,14 @@ use async_graphql::{ NextParseQuery, NextRequest, NextResolve, + NextValidation, ResolveInfo, }, parser::types::ExecutableDocument, Response, + ServerError, ServerResult, + ValidationResult, Value, Variables, }; @@ -118,4 +121,14 @@ impl Extension for MetricsExtInner { res } + + async fn validation( + &self, + ctx: &ExtensionContext<'_>, + next: NextValidation<'_>, + ) -> Result> { + let result = next.run(ctx).await?; + graphql_metrics().graphql_complexity_observe(result.complexity as f64); + Ok(result) + } } diff --git a/crates/fuel-core/src/query.rs b/crates/fuel-core/src/query.rs index fc2dc79ea9b..0dc744bcec0 100644 --- a/crates/fuel-core/src/query.rs +++ b/crates/fuel-core/src/query.rs @@ -1,7 +1,6 @@ mod balance; mod blob; mod block; -mod chain; mod coin; mod contract; mod message; @@ -13,12 +12,5 @@ pub mod da_compressed; // TODO: Remove reexporting of everything pub use balance::*; -pub use blob::*; -pub use block::*; -pub use chain::*; -pub use coin::*; -pub use contract::*; pub use message::*; pub(crate) use subscriptions::*; -pub use tx::*; -pub use upgrades::*; diff --git a/crates/fuel-core/src/query/balance.rs b/crates/fuel-core/src/query/balance.rs index ecbc47620bd..161fd64b87e 100644 --- a/crates/fuel-core/src/query/balance.rs +++ b/crates/fuel-core/src/query/balance.rs @@ -4,12 +4,9 @@ use asset_query::{ AssetSpendTarget, AssetsQuery, }; +use fuel_core_services::yield_stream::StreamYieldExt; use fuel_core_storage::{ - iter::{ - BoxedIter, - IntoBoxedIter, - IterDirection, - }, + iter::IterDirection, Result as StorageResult, }; use fuel_core_types::{ @@ -19,7 +16,12 @@ use fuel_core_types::{ }, services::graphql_api::AddressBalance, }; -use itertools::Itertools; +use futures::{ + FutureExt, + Stream, + StreamExt, + TryStreamExt, +}; use std::{ cmp::Ordering, collections::HashMap, @@ -27,24 +29,8 @@ use std::{ pub mod asset_query; -pub trait BalanceQueryData: Send + Sync { - fn balance( - &self, - owner: Address, - asset_id: AssetId, - base_asset_id: AssetId, - ) -> StorageResult; - - fn balances( - &self, - owner: Address, - direction: IterDirection, - base_asset_id: AssetId, - ) -> BoxedIter>; -} - -impl BalanceQueryData for ReadView { - fn balance( +impl ReadView { + pub async fn balance( &self, owner: Address, asset_id: AssetId, @@ -52,21 +38,20 @@ impl BalanceQueryData for ReadView { ) -> StorageResult { let amount = AssetQuery::new( &owner, - &AssetSpendTarget::new(asset_id, u64::MAX, usize::MAX), + &AssetSpendTarget::new(asset_id, u64::MAX, u16::MAX), &base_asset_id, None, self, ) .coins() .map(|res| res.map(|coins| coins.amount())) - .try_fold(0u64, |mut balance, res| -> StorageResult<_> { - let amount = res?; - - // Increase the balance - balance = balance.saturating_add(amount); - - Ok(balance) - })?; + .try_fold(0u64, |balance, amount| { + async move { + // Increase the balance + Ok(balance.saturating_add(amount)) + } + }) + .await?; Ok(AddressBalance { owner, @@ -75,54 +60,53 @@ impl BalanceQueryData for ReadView { }) } - fn balances( - &self, - owner: Address, + pub fn balances<'a>( + &'a self, + owner: &'a Address, direction: IterDirection, - base_asset_id: AssetId, - ) -> BoxedIter> { - let mut amounts_per_asset = HashMap::new(); - let mut errors = vec![]; + base_asset_id: &'a AssetId, + ) -> impl Stream> + 'a { + let query = AssetsQuery::new(owner, None, None, self, base_asset_id); + let stream = query.coins(); - for coin in AssetsQuery::new(&owner, None, None, self, &base_asset_id).coins() { - match coin { - Ok(coin) => { + stream + .try_fold( + HashMap::new(), + move |mut amounts_per_asset, coin| async move { let amount: &mut u64 = amounts_per_asset - .entry(*coin.asset_id(&base_asset_id)) + .entry(*coin.asset_id(base_asset_id)) .or_default(); *amount = amount.saturating_add(coin.amount()); - } - Err(err) => { - errors.push(err); - } - } - } + Ok(amounts_per_asset) + }, + ) + .into_stream() + .try_filter_map(move |amounts_per_asset| async move { + let mut balances = amounts_per_asset + .into_iter() + .map(|(asset_id, amount)| AddressBalance { + owner: *owner, + amount, + asset_id, + }) + .collect::>(); - let mut balances = amounts_per_asset - .into_iter() - .map(|(asset_id, amount)| AddressBalance { - owner, - amount, - asset_id, - }) - .collect_vec(); + balances.sort_by(|l, r| { + if l.asset_id < r.asset_id { + Ordering::Less + } else { + Ordering::Greater + } + }); - balances.sort_by(|l, r| { - if l.asset_id < r.asset_id { - Ordering::Less - } else { - Ordering::Greater - } - }); - - if direction == IterDirection::Reverse { - balances.reverse(); - } + if direction == IterDirection::Reverse { + balances.reverse(); + } - balances - .into_iter() - .map(Ok) - .chain(errors.into_iter().map(Err)) - .into_boxed() + Ok(Some(futures::stream::iter(balances))) + }) + .map_ok(|stream| stream.map(Ok)) + .try_flatten() + .yield_each(self.batch_size) } } diff --git a/crates/fuel-core/src/query/balance/asset_query.rs b/crates/fuel-core/src/query/balance/asset_query.rs index 57225b07930..13a289ec1e4 100644 --- a/crates/fuel-core/src/query/balance/asset_query.rs +++ b/crates/fuel-core/src/query/balance/asset_query.rs @@ -1,10 +1,5 @@ -use crate::{ - graphql_api::database::ReadView, - query::{ - CoinQueryData, - MessageQueryData, - }, -}; +use crate::graphql_api::database::ReadView; +use fuel_core_services::stream::IntoBoxStream; use fuel_core_storage::{ iter::IterDirection, Error as StorageError, @@ -20,19 +15,23 @@ use fuel_core_types::{ AssetId, }, }; -use itertools::Itertools; +use futures::{ + Stream, + TryStreamExt, +}; use std::collections::HashSet; +use tokio_stream::StreamExt; /// At least required `target` of the query per asset's `id` with `max` coins. #[derive(Clone)] pub struct AssetSpendTarget { pub id: AssetId, pub target: u64, - pub max: usize, + pub max: u16, } impl AssetSpendTarget { - pub fn new(id: AssetId, target: u64, max: usize) -> Self { + pub fn new(id: AssetId, target: u64, max: u16) -> Self { Self { id, target, max } } } @@ -54,9 +53,10 @@ impl Exclude { } } +#[derive(Clone)] pub struct AssetsQuery<'a> { pub owner: &'a Address, - pub assets: Option>, + pub allowed_assets: Option>, pub exclude: Option<&'a Exclude>, pub database: &'a ReadView, pub base_asset_id: &'a AssetId, @@ -65,27 +65,34 @@ pub struct AssetsQuery<'a> { impl<'a> AssetsQuery<'a> { pub fn new( owner: &'a Address, - assets: Option>, + allowed_assets: Option>, exclude: Option<&'a Exclude>, database: &'a ReadView, base_asset_id: &'a AssetId, ) -> Self { Self { owner, - assets, + allowed_assets, exclude, database, base_asset_id, } } - fn coins_iter(&self) -> impl Iterator> + '_ { - self.database + fn coins_iter(mut self) -> impl Stream> + 'a { + let allowed_assets = self.allowed_assets.take(); + let database = self.database; + let stream = self + .database .owned_coins_ids(self.owner, None, IterDirection::Forward) .map(|id| id.map(CoinId::from)) - .filter_ok(|id| { - if let Some(exclude) = self.exclude { - !exclude.coin_ids.contains(id) + .filter(move |result| { + if let Ok(id) = result { + if let Some(exclude) = self.exclude { + !exclude.coin_ids.contains(id) + } else { + true + } } else { true } @@ -97,27 +104,48 @@ impl<'a> AssetsQuery<'a> { } else { return Err(anyhow::anyhow!("The coin is not UTXO").into()); }; - let coin = self.database.coin(id)?; - - Ok(CoinType::Coin(coin)) + Ok(id) }) + }); + + futures::stream::StreamExt::chunks(stream, database.batch_size) + .map(|chunk| { + use itertools::Itertools; + + let chunk = chunk.into_iter().try_collect::<_, Vec<_>, _>()?; + Ok::<_, StorageError>(chunk) + }) + .try_filter_map(move |chunk| async move { + let chunk = database + .coins(chunk) + .await + .map(|result| result.map(CoinType::Coin)); + Ok(Some(futures::stream::iter(chunk))) }) - .filter_ok(|coin| { - if let CoinType::Coin(coin) = coin { - self.has_asset(&coin.asset_id) + .try_flatten() + .filter(move |result| { + if let Ok(CoinType::Coin(coin)) = result { + allowed_asset(&allowed_assets, &coin.asset_id) } else { true } }) } - fn messages_iter(&self) -> impl Iterator> + '_ { - self.database + fn messages_iter(&self) -> impl Stream> + 'a { + let exclude = self.exclude; + let database = self.database; + let stream = self + .database .owned_message_ids(self.owner, None, IterDirection::Forward) .map(|id| id.map(CoinId::from)) - .filter_ok(|id| { - if let Some(exclude) = self.exclude { - !exclude.coin_ids.contains(id) + .filter(move |result| { + if let Ok(id) = result { + if let Some(e) = exclude { + !e.coin_ids.contains(id) + } else { + true + } } else { true } @@ -129,11 +157,29 @@ impl<'a> AssetsQuery<'a> { } else { return Err(anyhow::anyhow!("The coin is not a message").into()); }; - let message = self.database.message(&id)?; - Ok(message) + Ok(id) }) + }); + + futures::stream::StreamExt::chunks(stream, database.batch_size) + .map(|chunk| { + use itertools::Itertools; + + let chunk = chunk.into_iter().try_collect::<_, Vec<_>, _>()?; + Ok(chunk) + }) + .try_filter_map(move |chunk| async move { + let chunk = database.messages(chunk).await; + Ok::<_, StorageError>(Some(futures::stream::iter(chunk))) + }) + .try_flatten() + .filter(|result| { + if let Ok(message) = result { + message.data().is_empty() + } else { + true + } }) - .filter_ok(|message| message.data().is_empty()) .map(|result| { result.map(|message| { CoinType::MessageCoin( @@ -145,28 +191,23 @@ impl<'a> AssetsQuery<'a> { }) } - fn has_asset(&self, asset_id: &AssetId) -> bool { - self.assets - .as_ref() - .map(|assets| assets.contains(asset_id)) - .unwrap_or(true) - } - /// Returns the iterator over all valid(spendable, allowed by `exclude`) coins of the `owner`. /// /// # Note: The coins of different type are not grouped by the `asset_id`. // TODO: Optimize this by creating an index // https://github.com/FuelLabs/fuel-core/issues/588 - pub fn coins(&self) -> impl Iterator> + '_ { - let has_base_asset = self.has_asset(self.base_asset_id); - let messages_iter = has_base_asset - .then(|| self.messages_iter()) - .into_iter() - .flatten(); - self.coins_iter().chain(messages_iter) + pub fn coins(self) -> impl Stream> + 'a { + let has_base_asset = allowed_asset(&self.allowed_assets, self.base_asset_id); + if has_base_asset { + let message_iter = self.messages_iter(); + self.coins_iter().chain(message_iter).into_boxed_ref() + } else { + self.coins_iter().into_boxed_ref() + } } } +#[derive(Clone)] pub struct AssetQuery<'a> { pub owner: &'a Address, pub asset: &'a AssetSpendTarget, @@ -202,7 +243,14 @@ impl<'a> AssetQuery<'a> { /// Returns the iterator over all valid(spendable, allowed by `exclude`) coins of the `owner` /// for the `asset_id`. - pub fn coins(&self) -> impl Iterator> + '_ { + pub fn coins(self) -> impl Stream> + 'a { self.query.coins() } } + +fn allowed_asset(allowed_assets: &Option>, asset_id: &AssetId) -> bool { + allowed_assets + .as_ref() + .map(|allowed_assets| allowed_assets.contains(asset_id)) + .unwrap_or(true) +} diff --git a/crates/fuel-core/src/query/blob.rs b/crates/fuel-core/src/query/blob.rs index a3987c4ff10..6c7f9e7de8d 100644 --- a/crates/fuel-core/src/query/blob.rs +++ b/crates/fuel-core/src/query/blob.rs @@ -1,7 +1,4 @@ -use crate::graphql_api::ports::{ - OffChainDatabase, - OnChainDatabase, -}; +use crate::fuel_core_graphql_api::database::ReadView; use fuel_core_storage::{ not_found, tables::BlobData, @@ -10,17 +7,12 @@ use fuel_core_storage::{ }; use fuel_core_types::fuel_tx::BlobId; -pub trait BlobQueryData: Send + Sync { - fn blob_exists(&self, id: BlobId) -> StorageResult; - fn blob_bytecode(&self, id: BlobId) -> StorageResult>; -} - -impl BlobQueryData for D { - fn blob_exists(&self, id: BlobId) -> StorageResult { +impl ReadView { + pub fn blob_exists(&self, id: BlobId) -> StorageResult { self.storage::().contains_key(&id) } - fn blob_bytecode(&self, id: BlobId) -> StorageResult> { + pub fn blob_bytecode(&self, id: BlobId) -> StorageResult> { let blob = self .storage::() .get(&id)? diff --git a/crates/fuel-core/src/query/block.rs b/crates/fuel-core/src/query/block.rs index 9c365a3ef26..f4b461639f0 100644 --- a/crates/fuel-core/src/query/block.rs +++ b/crates/fuel-core/src/query/block.rs @@ -1,67 +1,29 @@ -use crate::fuel_core_graphql_api::ports::DatabaseBlocks; +use crate::fuel_core_graphql_api::database::ReadView; +use fuel_core_services::yield_stream::StreamYieldExt; use fuel_core_storage::{ - iter::{ - BoxedIter, - IterDirection, - }, + iter::IterDirection, Result as StorageResult, }; use fuel_core_types::{ - blockchain::{ - block::CompressedBlock, - consensus::Consensus, - }, + blockchain::block::CompressedBlock, fuel_types::BlockHeight, }; +use futures::Stream; -pub trait SimpleBlockData: Send + Sync { - fn block(&self, id: &BlockHeight) -> StorageResult; -} - -impl SimpleBlockData for D -where - D: DatabaseBlocks + ?Sized + Send + Sync, -{ - fn block(&self, id: &BlockHeight) -> StorageResult { - self.block(id) - } -} - -pub trait BlockQueryData: Send + Sync + SimpleBlockData { - fn latest_block_height(&self) -> StorageResult; - - fn latest_block(&self) -> StorageResult; - - fn compressed_blocks( - &self, - height: Option, - direction: IterDirection, - ) -> BoxedIter>; - - fn consensus(&self, id: &BlockHeight) -> StorageResult; -} - -impl BlockQueryData for D -where - D: DatabaseBlocks + ?Sized + Send + Sync, -{ - fn latest_block_height(&self) -> StorageResult { +impl ReadView { + pub fn latest_block_height(&self) -> StorageResult { self.latest_height() } - fn latest_block(&self) -> StorageResult { + pub fn latest_block(&self) -> StorageResult { self.block(&self.latest_block_height()?) } - fn compressed_blocks( + pub fn compressed_blocks( &self, height: Option, direction: IterDirection, - ) -> BoxedIter> { - self.blocks(height, direction) - } - - fn consensus(&self, id: &BlockHeight) -> StorageResult { - self.consensus(id) + ) -> impl Stream> + '_ { + futures::stream::iter(self.blocks(height, direction)).yield_each(self.batch_size) } } diff --git a/crates/fuel-core/src/query/chain.rs b/crates/fuel-core/src/query/chain.rs deleted file mode 100644 index aebf442cf30..00000000000 --- a/crates/fuel-core/src/query/chain.rs +++ /dev/null @@ -1,13 +0,0 @@ -use crate::fuel_core_graphql_api::ports::OnChainDatabase; -use fuel_core_storage::Result as StorageResult; -use fuel_core_types::blockchain::primitives::DaBlockHeight; - -pub trait ChainQueryData: Send + Sync { - fn da_height(&self) -> StorageResult; -} - -impl ChainQueryData for D { - fn da_height(&self) -> StorageResult { - self.da_height() - } -} diff --git a/crates/fuel-core/src/query/coin.rs b/crates/fuel-core/src/query/coin.rs index 171a88168bd..c487bdba23c 100644 --- a/crates/fuel-core/src/query/coin.rs +++ b/crates/fuel-core/src/query/coin.rs @@ -1,15 +1,9 @@ -use crate::fuel_core_graphql_api::ports::{ - OffChainDatabase, - OnChainDatabase, -}; +use crate::fuel_core_graphql_api::database::ReadView; use fuel_core_storage::{ - iter::{ - BoxedIter, - IntoBoxedIter, - IterDirection, - }, + iter::IterDirection, not_found, tables::Coins, + Error as StorageError, Result as StorageResult, StorageAsRef, }; @@ -18,28 +12,17 @@ use fuel_core_types::{ fuel_tx::UtxoId, fuel_types::Address, }; +use futures::{ + Stream, + StreamExt, + TryStreamExt, +}; -pub trait CoinQueryData: Send + Sync { - fn coin(&self, utxo_id: UtxoId) -> StorageResult; - - fn owned_coins_ids( - &self, - owner: &Address, - start_coin: Option, - direction: IterDirection, - ) -> BoxedIter>; - - fn owned_coins( - &self, - owner: &Address, - start_coin: Option, - direction: IterDirection, - ) -> BoxedIter>; -} - -impl CoinQueryData for D { - fn coin(&self, utxo_id: UtxoId) -> StorageResult { +impl ReadView { + pub fn coin(&self, utxo_id: UtxoId) -> StorageResult { let coin = self + .on_chain + .as_ref() .storage::() .get(&utxo_id)? .ok_or(not_found!(Coins))? @@ -48,23 +31,36 @@ impl CoinQueryData for D { Ok(coin.uncompress(utxo_id)) } - fn owned_coins_ids( + pub async fn coins( &self, - owner: &Address, - start_coin: Option, - direction: IterDirection, - ) -> BoxedIter> { - self.owned_coins_ids(owner, start_coin, direction) + utxo_ids: Vec, + ) -> impl Iterator> + '_ { + // TODO: Use multiget when it's implemented. + // https://github.com/FuelLabs/fuel-core/issues/2344 + let coins = utxo_ids.into_iter().map(|id| self.coin(id)); + // Give a chance to other tasks to run. + tokio::task::yield_now().await; + coins } - fn owned_coins( + pub fn owned_coins( &self, owner: &Address, start_coin: Option, direction: IterDirection, - ) -> BoxedIter> { + ) -> impl Stream> + '_ { self.owned_coins_ids(owner, start_coin, direction) - .map(|res| res.and_then(|id| self.coin(id))) - .into_boxed() + .chunks(self.batch_size) + .map(|chunk| { + use itertools::Itertools; + + let chunk = chunk.into_iter().try_collect::<_, Vec<_>, _>()?; + Ok::<_, StorageError>(chunk) + }) + .try_filter_map(move |chunk| async move { + let chunk = self.coins(chunk).await; + Ok(Some(futures::stream::iter(chunk))) + }) + .try_flatten() } } diff --git a/crates/fuel-core/src/query/contract.rs b/crates/fuel-core/src/query/contract.rs index 3311fec5dd9..fa75e05a874 100644 --- a/crates/fuel-core/src/query/contract.rs +++ b/crates/fuel-core/src/query/contract.rs @@ -1,12 +1,5 @@ -use crate::fuel_core_graphql_api::ports::{ - OffChainDatabase, - OnChainDatabase, -}; +use crate::fuel_core_graphql_api::database::ReadView; use fuel_core_storage::{ - iter::{ - BoxedIter, - IterDirection, - }, not_found, tables::{ ContractsAssets, @@ -20,38 +13,21 @@ use fuel_core_types::{ AssetId, ContractId, }, - fuel_vm::Salt, services::graphql_api::ContractBalance, }; -pub trait ContractQueryData: Send + Sync { - fn contract_exists(&self, id: ContractId) -> StorageResult; - - fn contract_bytecode(&self, id: ContractId) -> StorageResult>; - - fn contract_salt(&self, id: ContractId) -> StorageResult; - - fn contract_balance( - &self, - contract_id: ContractId, - asset_id: AssetId, - ) -> StorageResult; - - fn contract_balances( - &self, - contract_id: ContractId, - start_asset: Option, - direction: IterDirection, - ) -> BoxedIter>; -} - -impl ContractQueryData for D { - fn contract_exists(&self, id: ContractId) -> StorageResult { - self.storage::().contains_key(&id) +impl ReadView { + pub fn contract_exists(&self, id: ContractId) -> StorageResult { + self.on_chain + .as_ref() + .storage::() + .contains_key(&id) } - fn contract_bytecode(&self, id: ContractId) -> StorageResult> { + pub fn contract_bytecode(&self, id: ContractId) -> StorageResult> { let contract = self + .on_chain + .as_ref() .storage::() .get(&id)? .ok_or(not_found!(ContractsRawCode))? @@ -60,16 +36,14 @@ impl ContractQueryData for D { Ok(contract.into()) } - fn contract_salt(&self, id: ContractId) -> StorageResult { - self.contract_salt(&id) - } - - fn contract_balance( + pub fn contract_balance( &self, contract_id: ContractId, asset_id: AssetId, ) -> StorageResult { let amount = self + .on_chain + .as_ref() .storage::() .get(&(&contract_id, &asset_id).into())? .ok_or(not_found!(ContractsAssets))? @@ -81,13 +55,4 @@ impl ContractQueryData for D { asset_id, }) } - - fn contract_balances( - &self, - contract_id: ContractId, - start_asset: Option, - direction: IterDirection, - ) -> BoxedIter> { - self.contract_balances(contract_id, start_asset, direction) - } } diff --git a/crates/fuel-core/src/query/message.rs b/crates/fuel-core/src/query/message.rs index ece4a93fec7..c98b2358b2c 100644 --- a/crates/fuel-core/src/query/message.rs +++ b/crates/fuel-core/src/query/message.rs @@ -1,24 +1,10 @@ -use crate::{ - fuel_core_graphql_api::{ - ports::{ - DatabaseBlocks, - DatabaseMessageProof, - DatabaseMessages, - OffChainDatabase, - OnChainDatabase, - }, - IntoApiResult, - }, - query::{ - SimpleBlockData, - SimpleTransactionData, - TransactionQueryData, - }, +use crate::fuel_core_graphql_api::{ + database::ReadView, + IntoApiResult, }; use fuel_core_storage::{ iter::{ BoxedIter, - IntoBoxedIter, IterDirection, }, not_found, @@ -50,6 +36,11 @@ use fuel_core_types::{ }, services::txpool::TransactionStatus, }; +use futures::{ + Stream, + StreamExt, + TryStreamExt, +}; use itertools::Itertools; use std::borrow::Cow; @@ -80,63 +71,93 @@ pub trait MessageQueryData: Send + Sync { ) -> BoxedIter>; } -impl MessageQueryData for D { - fn message(&self, id: &Nonce) -> StorageResult { - self.storage::() +impl ReadView { + pub fn message(&self, id: &Nonce) -> StorageResult { + self.on_chain + .as_ref() + .storage::() .get(id)? .ok_or(not_found!(Messages)) .map(Cow::into_owned) } - fn owned_message_ids( + pub async fn messages( &self, - owner: &Address, - start_message_id: Option, - direction: IterDirection, - ) -> BoxedIter> { - self.owned_message_ids(owner, start_message_id, direction) + ids: Vec, + ) -> impl Iterator> + '_ { + // TODO: Use multiget when it's implemented. + // https://github.com/FuelLabs/fuel-core/issues/2344 + let messages = ids.into_iter().map(|id| self.message(&id)); + // Give a chance to other tasks to run. + tokio::task::yield_now().await; + messages } - fn owned_messages( - &self, - owner: &Address, + pub fn owned_messages<'a>( + &'a self, + owner: &'a Address, start_message_id: Option, direction: IterDirection, - ) -> BoxedIter> { + ) -> impl Stream> + 'a { self.owned_message_ids(owner, start_message_id, direction) - .map(|result| result.and_then(|id| self.message(&id))) - .into_boxed() - } - - fn all_messages( - &self, - start_message_id: Option, - direction: IterDirection, - ) -> BoxedIter> { - self.all_messages(start_message_id, direction) + .chunks(self.batch_size) + .map(|chunk| { + let chunk = chunk.into_iter().try_collect::<_, Vec<_>, _>()?; + Ok(chunk) + }) + .try_filter_map(move |chunk| async move { + let chunk = self.messages(chunk).await; + Ok::<_, StorageError>(Some(futures::stream::iter(chunk))) + }) + .try_flatten() } } /// Trait that specifies all the data required by the output message query. -pub trait MessageProofData: - Send + Sync + SimpleBlockData + SimpleTransactionData + DatabaseMessageProof -{ +pub trait MessageProofData { + /// Get the block. + fn block(&self, id: &BlockHeight) -> StorageResult; + + /// Return all receipts in the given transaction. + fn receipts(&self, transaction_id: &TxId) -> StorageResult>; + /// Get the status of a transaction. fn transaction_status( &self, transaction_id: &TxId, ) -> StorageResult; + + /// Gets the [`MerkleProof`] for the message block at `message_block_height` height + /// relatively to the commit block where message block <= commit block. + fn block_history_proof( + &self, + message_block_height: &BlockHeight, + commit_block_height: &BlockHeight, + ) -> StorageResult; } -impl MessageProofData for D -where - D: OnChainDatabase + DatabaseBlocks + OffChainDatabase + ?Sized, -{ +impl MessageProofData for ReadView { + fn block(&self, id: &BlockHeight) -> StorageResult { + self.block(id) + } + + fn receipts(&self, transaction_id: &TxId) -> StorageResult> { + self.receipts(transaction_id) + } + fn transaction_status( &self, transaction_id: &TxId, ) -> StorageResult { - self.status(transaction_id) + self.tx_status(transaction_id) + } + + fn block_history_proof( + &self, + message_block_height: &BlockHeight, + commit_block_height: &BlockHeight, + ) -> StorageResult { + self.block_history_proof(message_block_height, commit_block_height) } } @@ -282,13 +303,10 @@ fn message_receipts_proof( } } -pub fn message_status( - database: &T, +pub fn message_status( + database: &ReadView, message_nonce: Nonce, -) -> StorageResult -where - T: OffChainDatabase + DatabaseMessages + ?Sized, -{ +) -> StorageResult { if database.message_is_spent(&message_nonce)? { Ok(MessageStatus::spent()) } else if database.message_exists(&message_nonce)? { diff --git a/crates/fuel-core/src/query/message/test.rs b/crates/fuel-core/src/query/message/test.rs index 3078f3a7de9..43d6ecbef1a 100644 --- a/crates/fuel-core/src/query/message/test.rs +++ b/crates/fuel-core/src/query/message/test.rs @@ -10,8 +10,6 @@ use fuel_core_types::{ fuel_tx::{ AssetId, ContractId, - Script, - Transaction, }, fuel_types::BlockHeight, tai64::Tai64, @@ -57,24 +55,14 @@ fn receipt(i: Option) -> Receipt { mockall::mock! { pub ProofDataStorage {} - impl SimpleBlockData for ProofDataStorage { + impl MessageProofData for ProofDataStorage { fn block(&self, height: &BlockHeight) -> StorageResult; - } - - impl DatabaseMessageProof for ProofDataStorage { fn block_history_proof( &self, message_block_height: &BlockHeight, commit_block_height: &BlockHeight, ) -> StorageResult; - } - - impl SimpleTransactionData for ProofDataStorage { - fn transaction(&self, transaction_id: &TxId) -> StorageResult; fn receipts(&self, transaction_id: &TxId) -> StorageResult>; - } - - impl MessageProofData for ProofDataStorage { fn transaction_status(&self, transaction_id: &TxId) -> StorageResult; } } @@ -116,16 +104,6 @@ async fn can_build_message_proof() { } }); - data.expect_transaction().returning(move |txn_id| { - let tx = TXNS - .iter() - .find(|t| *t == txn_id) - .map(|_| Script::default().into()) - .ok_or(not_found!("Transaction in `TXNS`"))?; - - Ok(tx) - }); - let commit_block_header = PartialBlockHeader { application: ApplicationHeader { da_height: 0u64.into(), diff --git a/crates/fuel-core/src/query/tx.rs b/crates/fuel-core/src/query/tx.rs index 1d2f1531363..0bceeef8809 100644 --- a/crates/fuel-core/src/query/tx.rs +++ b/crates/fuel-core/src/query/tx.rs @@ -1,17 +1,9 @@ -use crate::fuel_core_graphql_api::ports::{ - DatabaseBlocks, - OffChainDatabase, - OnChainDatabase, -}; - +use crate::fuel_core_graphql_api::database::ReadView; use fuel_core_storage::{ - iter::{ - BoxedIter, - IntoBoxedIter, - IterDirection, - }, + iter::IterDirection, not_found, tables::Transactions, + Error as StorageError, Result as StorageResult, }; use fuel_core_types::{ @@ -24,25 +16,15 @@ use fuel_core_types::{ fuel_types::Address, services::txpool::TransactionStatus, }; +use futures::{ + Stream, + StreamExt, + TryStreamExt, +}; -pub trait SimpleTransactionData: Send + Sync { - /// Return all receipts in the given transaction. - fn receipts(&self, transaction_id: &TxId) -> StorageResult>; - - /// Get the transaction. - fn transaction(&self, transaction_id: &TxId) -> StorageResult; -} - -impl SimpleTransactionData for D -where - D: OnChainDatabase + DatabaseBlocks + OffChainDatabase + ?Sized, -{ - fn transaction(&self, tx_id: &TxId) -> StorageResult { - self.transaction(tx_id) - } - - fn receipts(&self, tx_id: &TxId) -> StorageResult> { - let status = self.status(tx_id)?; +impl ReadView { + pub fn receipts(&self, tx_id: &TxId) -> StorageResult> { + let status = self.tx_status(tx_id)?; let receipts = match status { TransactionStatus::Success { receipts, .. } @@ -51,41 +33,30 @@ where }; receipts.ok_or(not_found!(Transactions)) } -} - -pub trait TransactionQueryData: Send + Sync + SimpleTransactionData { - fn status(&self, tx_id: &TxId) -> StorageResult; - - fn owned_transactions( - &self, - owner: Address, - start: Option, - direction: IterDirection, - ) -> BoxedIter>; -} - -impl TransactionQueryData for D -where - D: OnChainDatabase + DatabaseBlocks + OffChainDatabase + ?Sized, -{ - fn status(&self, tx_id: &TxId) -> StorageResult { - self.tx_status(tx_id) - } - fn owned_transactions( + pub fn owned_transactions( &self, owner: Address, start: Option, direction: IterDirection, - ) -> BoxedIter> { + ) -> impl Stream> + '_ { self.owned_transactions_ids(owner, start, direction) - .map(|result| { - result.and_then(|(tx_pointer, tx_id)| { - let tx = self.transaction(&tx_id)?; + .chunks(self.batch_size) + .map(|chunk| { + use itertools::Itertools; - Ok((tx_pointer, tx)) - }) + let chunk = chunk.into_iter().try_collect::<_, Vec<_>, _>()?; + Ok::<_, StorageError>(chunk) + }) + .try_filter_map(move |chunk| async move { + let tx_ids = chunk.iter().map(|(_, tx_id)| *tx_id).collect::>(); + let txs = self.transactions(tx_ids).await; + let txs = txs + .into_iter() + .zip(chunk) + .map(|(result, (tx_pointer, _))| result.map(|tx| (tx_pointer, tx))); + Ok(Some(futures::stream::iter(txs))) }) - .into_boxed() + .try_flatten() } } diff --git a/crates/fuel-core/src/query/upgrades.rs b/crates/fuel-core/src/query/upgrades.rs index 4a53edbe12a..850b7e6973f 100644 --- a/crates/fuel-core/src/query/upgrades.rs +++ b/crates/fuel-core/src/query/upgrades.rs @@ -1,3 +1,4 @@ +use crate::fuel_core_graphql_api::database::ReadView; use fuel_core_storage::{ not_found, tables::{ @@ -13,27 +14,14 @@ use fuel_core_types::{ fuel_vm::UploadedBytecode, }; -use crate::graphql_api::ports::OnChainDatabase; - -pub trait UpgradeQueryData: Send + Sync { - fn state_transition_bytecode_root( - &self, - version: StateTransitionBytecodeVersion, - ) -> StorageResult; - - fn state_transition_bytecode(&self, root: Bytes32) - -> StorageResult; -} - -impl UpgradeQueryData for D -where - D: OnChainDatabase + ?Sized, -{ - fn state_transition_bytecode_root( +impl ReadView { + pub fn state_transition_bytecode_root( &self, version: StateTransitionBytecodeVersion, ) -> StorageResult { let merkle_root = self + .on_chain + .as_ref() .storage::() .get(&version)? .ok_or(not_found!(StateTransitionBytecodeVersions))? @@ -42,11 +30,13 @@ where Ok(merkle_root) } - fn state_transition_bytecode( + pub fn state_transition_bytecode( &self, root: Bytes32, ) -> StorageResult { let bytecode = self + .on_chain + .as_ref() .storage::() .get(&root)? .ok_or(not_found!(UploadedBytecodes))? diff --git a/crates/fuel-core/src/schema.rs b/crates/fuel-core/src/schema.rs index bd9e550d448..bcbc5b5c970 100644 --- a/crates/fuel-core/src/schema.rs +++ b/crates/fuel-core/src/schema.rs @@ -23,8 +23,12 @@ use fuel_core_storage::{ iter::IterDirection, Result as StorageResult, }; -use itertools::Itertools; +use futures::{ + Stream, + TryStreamExt, +}; use std::borrow::Cow; +use tokio_stream::StreamExt; pub mod balance; pub mod blob; @@ -99,7 +103,7 @@ where // It means also returning `has_previous_page` and `has_next_page` values. // entries(start_key: Option) F: FnOnce(&Option, IterDirection) -> StorageResult, - Entries: Iterator>, + Entries: Stream>, SchemaKey: Eq, { match (after.as_ref(), before.as_ref(), first, last) { @@ -192,7 +196,7 @@ where } }); - let entries: Vec<_> = entries.try_collect()?; + let entries: Vec<_> = entries.try_collect().await?; let entries = entries.into_iter(); let mut connection = Connection::new(has_previous_page, has_next_page); diff --git a/crates/fuel-core/src/schema/balance.rs b/crates/fuel-core/src/schema/balance.rs index 6f83a831449..140bb81256f 100644 --- a/crates/fuel-core/src/schema/balance.rs +++ b/crates/fuel-core/src/schema/balance.rs @@ -1,9 +1,8 @@ use crate::{ fuel_core_graphql_api::{ api_service::ConsensusProvider, - QUERY_COSTS, + query_costs, }, - query::BalanceQueryData, schema::{ scalars::{ Address, @@ -24,6 +23,7 @@ use async_graphql::{ Object, }; use fuel_core_types::services::graphql_api; +use futures::StreamExt; pub struct Balance(graphql_api::AddressBalance); @@ -53,7 +53,7 @@ pub struct BalanceQuery; #[Object] impl BalanceQuery { - #[graphql(complexity = "QUERY_COSTS.balance_query")] + #[graphql(complexity = "query_costs().balance_query")] async fn balance( &self, ctx: &Context<'_>, @@ -65,13 +65,16 @@ impl BalanceQuery { .data_unchecked::() .latest_consensus_params() .base_asset_id(); - let balance = query.balance(owner.0, asset_id.0, base_asset_id)?.into(); + let balance = query + .balance(owner.0, asset_id.0, base_asset_id) + .await? + .into(); Ok(balance) } // TODO: This API should be migrated to the indexer for better support and // discontinued within fuel-core. - #[graphql(complexity = "QUERY_COSTS.balance_query")] + #[graphql(complexity = "query_costs().balance_query")] async fn balances( &self, ctx: &Context<'_>, @@ -86,14 +89,14 @@ impl BalanceQuery { return Err(anyhow!("pagination is not yet supported").into()) } let query = ctx.read_view()?; + let base_asset_id = *ctx + .data_unchecked::() + .latest_consensus_params() + .base_asset_id(); + let owner = filter.owner.into(); crate::schema::query_pagination(after, before, first, last, |_, direction| { - let owner = filter.owner.into(); - let base_asset_id = *ctx - .data_unchecked::() - .latest_consensus_params() - .base_asset_id(); Ok(query - .balances(owner, direction, base_asset_id) + .balances(&owner, direction, &base_asset_id) .map(|result| { result.map(|balance| (balance.asset_id.into(), balance.into())) })) diff --git a/crates/fuel-core/src/schema/blob.rs b/crates/fuel-core/src/schema/blob.rs index 938c2a6a1f3..76cc32530cc 100644 --- a/crates/fuel-core/src/schema/blob.rs +++ b/crates/fuel-core/src/schema/blob.rs @@ -1,7 +1,6 @@ use crate::{ - fuel_core_graphql_api::QUERY_COSTS, + fuel_core_graphql_api::query_costs, graphql_api::IntoApiResult, - query::BlobQueryData, schema::{ scalars::{ BlobId, @@ -28,7 +27,7 @@ impl Blob { self.0.into() } - #[graphql(complexity = "QUERY_COSTS.bytecode_read")] + #[graphql(complexity = "query_costs().bytecode_read")] async fn bytecode(&self, ctx: &Context<'_>) -> async_graphql::Result { let query = ctx.read_view()?; query @@ -49,7 +48,7 @@ pub struct BlobQuery; #[Object] impl BlobQuery { - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn blob( &self, ctx: &Context<'_>, diff --git a/crates/fuel-core/src/schema/block.rs b/crates/fuel-core/src/schema/block.rs index 81a9bf3c0af..7cdfeff2d5b 100644 --- a/crates/fuel-core/src/schema/block.rs +++ b/crates/fuel-core/src/schema/block.rs @@ -7,15 +7,9 @@ use crate::{ fuel_core_graphql_api::{ api_service::ConsensusModule, database::ReadView, - ports::OffChainDatabase, + query_costs, Config as GraphQLConfig, IntoApiResult, - QUERY_COSTS, - }, - query::{ - BlockQueryData, - SimpleBlockData, - SimpleTransactionData, }, schema::{ scalars::{ @@ -42,11 +36,7 @@ use async_graphql::{ Union, }; use fuel_core_storage::{ - iter::{ - BoxedIter, - IntoBoxedIter, - IterDirection, - }, + iter::IterDirection, Result as StorageResult, }; use fuel_core_types::{ @@ -54,9 +44,15 @@ use fuel_core_types::{ block::CompressedBlock, header::BlockHeader, }, + fuel_tx::TxId, fuel_types, fuel_types::BlockHeight, }; +use futures::{ + Stream, + StreamExt, + TryStreamExt, +}; pub struct Block(pub(crate) CompressedBlock); @@ -118,14 +114,14 @@ impl Block { self.0.header().clone().into() } - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn consensus(&self, ctx: &Context<'_>) -> async_graphql::Result { let query = ctx.read_view()?; let height = self.0.header().height(); Ok(query.consensus(height)?.try_into()?) } - #[graphql(complexity = "QUERY_COSTS.block_transactions_ids")] + #[graphql(complexity = "query_costs().block_transactions_ids")] async fn transaction_ids(&self) -> Vec { self.0 .transactions() @@ -135,20 +131,33 @@ impl Block { } // Assume that in average we have 32 transactions per block. - #[graphql(complexity = "QUERY_COSTS.block_transactions + child_complexity")] + #[graphql(complexity = "query_costs().block_transactions + child_complexity")] async fn transactions( &self, ctx: &Context<'_>, ) -> async_graphql::Result> { let query = ctx.read_view()?; - self.0 - .transactions() - .iter() - .map(|tx_id| { - let tx = query.transaction(tx_id)?; - Ok(Transaction::from_tx(*tx_id, tx)) + let tx_ids = futures::stream::iter(self.0.transactions().iter().copied()); + + let result = tx_ids + .chunks(query.batch_size) + .filter_map(move |tx_ids: Vec| { + let async_query = query.as_ref().clone(); + async move { + let txs = async_query.transactions(tx_ids.clone()).await; + let txs = txs + .into_iter() + .zip(tx_ids.into_iter()) + .map(|(r, tx_id)| r.map(|tx| Transaction::from_tx(tx_id, tx))); + + Some(futures::stream::iter(txs)) + } }) - .collect() + .flatten() + .try_collect() + .await?; + + Ok(result) } } @@ -246,7 +255,7 @@ pub struct BlockQuery; #[Object] impl BlockQuery { - #[graphql(complexity = "QUERY_COSTS.block_header + child_complexity")] + #[graphql(complexity = "query_costs().block_header + child_complexity")] async fn block( &self, ctx: &Context<'_>, @@ -276,7 +285,7 @@ impl BlockQuery { } #[graphql(complexity = "{\ - (QUERY_COSTS.block_header + child_complexity) \ + (query_costs().block_header + child_complexity) \ * (first.unwrap_or_default() as usize + last.unwrap_or_default() as usize) \ }")] async fn blocks( @@ -304,7 +313,7 @@ pub struct HeaderQuery; #[Object] impl HeaderQuery { - #[graphql(complexity = "QUERY_COSTS.block_header + child_complexity")] + #[graphql(complexity = "query_costs().block_header + child_complexity")] async fn header( &self, ctx: &Context<'_>, @@ -318,7 +327,7 @@ impl HeaderQuery { } #[graphql(complexity = "{\ - (QUERY_COSTS.block_header + child_complexity) \ + (query_costs().block_header + child_complexity) \ * (first.unwrap_or_default() as usize + last.unwrap_or_default() as usize) \ }")] async fn headers( @@ -345,16 +354,14 @@ fn blocks_query( query: &ReadView, height: Option, direction: IterDirection, -) -> BoxedIter> +) -> impl Stream> + '_ where T: async_graphql::OutputType, T: From, { - let blocks = query.compressed_blocks(height, direction).map(|result| { + query.compressed_blocks(height, direction).map(|result| { result.map(|block| ((*block.header().height()).into(), block.into())) - }); - - blocks.into_boxed() + }) } #[derive(Default)] diff --git a/crates/fuel-core/src/schema/chain.rs b/crates/fuel-core/src/schema/chain.rs index 16ec77b1e46..9acdbb01940 100644 --- a/crates/fuel-core/src/schema/chain.rs +++ b/crates/fuel-core/src/schema/chain.rs @@ -1,13 +1,9 @@ use crate::{ fuel_core_graphql_api::{ api_service::ConsensusProvider, - QUERY_COSTS, + query_costs, }, graphql_api::Config, - query::{ - BlockQueryData, - ChainQueryData, - }, schema::{ block::Block, scalars::{ @@ -779,13 +775,13 @@ impl HeavyOperation { #[Object] impl ChainInfo { - #[graphql(complexity = "QUERY_COSTS.storage_read")] + #[graphql(complexity = "query_costs().storage_read")] async fn name(&self, ctx: &Context<'_>) -> async_graphql::Result { let config: &Config = ctx.data_unchecked(); Ok(config.chain_name.clone()) } - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn latest_block(&self, ctx: &Context<'_>) -> async_graphql::Result { let query = ctx.read_view()?; @@ -793,7 +789,7 @@ impl ChainInfo { Ok(latest_block) } - #[graphql(complexity = "QUERY_COSTS.storage_read")] + #[graphql(complexity = "query_costs().storage_read")] async fn da_height(&self, ctx: &Context<'_>) -> U64 { let Ok(query) = ctx.read_view() else { return 0.into(); @@ -802,7 +798,7 @@ impl ChainInfo { query.da_height().unwrap_or_default().0.into() } - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn consensus_parameters( &self, ctx: &Context<'_>, @@ -814,7 +810,7 @@ impl ChainInfo { Ok(ConsensusParameters(params)) } - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn gas_costs(&self, ctx: &Context<'_>) -> async_graphql::Result { let params = ctx .data_unchecked::() diff --git a/crates/fuel-core/src/schema/coins.rs b/crates/fuel-core/src/schema/coins.rs index 0d0abc97da2..ab9b0ca8959 100644 --- a/crates/fuel-core/src/schema/coins.rs +++ b/crates/fuel-core/src/schema/coins.rs @@ -4,14 +4,11 @@ use crate::{ SpendQuery, }, fuel_core_graphql_api::{ + query_costs, IntoApiResult, - QUERY_COSTS, }, graphql_api::api_service::ConsensusProvider, - query::{ - asset_query::AssetSpendTarget, - CoinQueryData, - }, + query::asset_query::AssetSpendTarget, schema::{ scalars::{ Address, @@ -43,6 +40,7 @@ use fuel_core_types::{ fuel_tx, }; use itertools::Itertools; +use tokio_stream::StreamExt; pub struct Coin(pub(crate) CoinModel); @@ -95,7 +93,7 @@ impl MessageCoin { self.0.amount.into() } - #[graphql(complexity = "QUERY_COSTS.storage_read")] + #[graphql(complexity = "query_costs().storage_read")] async fn asset_id(&self, ctx: &Context<'_>) -> AssetId { let params = ctx .data_unchecked::() @@ -151,7 +149,7 @@ pub struct CoinQuery; #[async_graphql::Object] impl CoinQuery { /// Gets the coin by `utxo_id`. - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn coin( &self, ctx: &Context<'_>, @@ -163,9 +161,9 @@ impl CoinQuery { /// Gets all unspent coins of some `owner` maybe filtered with by `asset_id` per page. #[graphql(complexity = "{\ - QUERY_COSTS.storage_iterator\ - + (QUERY_COSTS.storage_read + first.unwrap_or_default() as usize) * child_complexity \ - + (QUERY_COSTS.storage_read + last.unwrap_or_default() as usize) * child_complexity\ + query_costs().storage_iterator\ + + (query_costs().storage_read + first.unwrap_or_default() as usize) * child_complexity \ + + (query_costs().storage_read + last.unwrap_or_default() as usize) * child_complexity\ }")] async fn coins( &self, @@ -177,8 +175,8 @@ impl CoinQuery { before: Option, ) -> async_graphql::Result> { let query = ctx.read_view()?; + let owner: fuel_tx::Address = filter.owner.into(); crate::schema::query_pagination(after, before, first, last, |start, direction| { - let owner: fuel_tx::Address = filter.owner.into(); let coins = query .owned_coins(&owner, (*start).map(Into::into), direction) .filter_map(|result| { @@ -208,7 +206,7 @@ impl CoinQuery { /// The list of spendable coins per asset from the query. The length of the result is /// the same as the length of `query_per_asset`. The ordering of assets and `query_per_asset` /// is the same. - #[graphql(complexity = "QUERY_COSTS.coins_to_spend")] + #[graphql(complexity = "query_costs().coins_to_spend")] async fn coins_to_spend( &self, ctx: &Context<'_>, @@ -216,8 +214,8 @@ impl CoinQuery { #[graphql(desc = "\ The list of requested assets` coins with asset ids, `target` amount the user wants \ to reach, and the `max` number of coins in the selection. Several entries with the \ - same asset id are not allowed.")] - query_per_asset: Vec, + same asset id are not allowed. The result can't contain more coins than `max_inputs`.")] + mut query_per_asset: Vec, #[graphql(desc = "The excluded coins from the selection.")] excluded_ids: Option< ExcludeInput, >, @@ -225,6 +223,15 @@ impl CoinQuery { let params = ctx .data_unchecked::() .latest_consensus_params(); + let max_input = params.tx_params().max_inputs(); + + // `coins_to_spend` exists to help select inputs for the transactions. + // It doesn't make sense to allow the user to request more than the maximum number + // of inputs. + // TODO: To avoid breaking changes, we will truncate request for now. + // In the future, we should return an error if the input is too large. + // https://github.com/FuelLabs/fuel-core/issues/2343 + query_per_asset.truncate(max_input as usize); let owner: fuel_tx::Address = owner.0; let query_per_asset = query_per_asset @@ -233,7 +240,10 @@ impl CoinQuery { AssetSpendTarget::new( e.asset_id.0, e.amount.0, - e.max.map(|max| max.0 as usize).unwrap_or(usize::MAX), + e.max + .and_then(|max| u16::try_from(max.0).ok()) + .unwrap_or(max_input) + .min(max_input), ) }) .collect_vec(); @@ -255,7 +265,8 @@ impl CoinQuery { let query = ctx.read_view()?; - let coins = random_improve(query.as_ref(), &spend_query)? + let coins = random_improve(query.as_ref(), &spend_query) + .await? .into_iter() .map(|coins| { coins diff --git a/crates/fuel-core/src/schema/contract.rs b/crates/fuel-core/src/schema/contract.rs index 2abc0e53a06..88e43c98a3d 100644 --- a/crates/fuel-core/src/schema/contract.rs +++ b/crates/fuel-core/src/schema/contract.rs @@ -1,9 +1,8 @@ use crate::{ fuel_core_graphql_api::{ + query_costs, IntoApiResult, - QUERY_COSTS, }, - query::ContractQueryData, schema::{ scalars::{ AssetId, @@ -32,6 +31,7 @@ use fuel_core_types::{ fuel_types, services::graphql_api, }; +use futures::StreamExt; pub struct Contract(pub(crate) fuel_types::ContractId); @@ -47,7 +47,7 @@ impl Contract { self.0.into() } - #[graphql(complexity = "QUERY_COSTS.bytecode_read")] + #[graphql(complexity = "query_costs().bytecode_read")] async fn bytecode(&self, ctx: &Context<'_>) -> async_graphql::Result { let query = ctx.read_view()?; query @@ -56,11 +56,11 @@ impl Contract { .map_err(Into::into) } - #[graphql(complexity = "QUERY_COSTS.storage_read")] + #[graphql(complexity = "query_costs().storage_read")] async fn salt(&self, ctx: &Context<'_>) -> async_graphql::Result { let query = ctx.read_view()?; query - .contract_salt(self.0) + .contract_salt(&self.0) .map(Into::into) .map_err(Into::into) } @@ -71,7 +71,7 @@ pub struct ContractQuery; #[Object] impl ContractQuery { - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn contract( &self, ctx: &Context<'_>, @@ -119,7 +119,7 @@ pub struct ContractBalanceQuery; #[Object] impl ContractBalanceQuery { - #[graphql(complexity = "QUERY_COSTS.storage_read")] + #[graphql(complexity = "query_costs().storage_read")] async fn contract_balance( &self, ctx: &Context<'_>, @@ -145,9 +145,9 @@ impl ContractBalanceQuery { } #[graphql(complexity = "{\ - QUERY_COSTS.storage_iterator\ - + (QUERY_COSTS.storage_read + first.unwrap_or_default() as usize) * child_complexity \ - + (QUERY_COSTS.storage_read + last.unwrap_or_default() as usize) * child_complexity\ + query_costs().storage_iterator\ + + (query_costs().storage_read + first.unwrap_or_default() as usize) * child_complexity \ + + (query_costs().storage_read + last.unwrap_or_default() as usize) * child_complexity\ }")] async fn contract_balances( &self, @@ -169,7 +169,7 @@ impl ContractBalanceQuery { (*start).map(Into::into), direction, ) - .map(move |balance| { + .map(|balance| { let balance = balance?; let asset_id = balance.asset_id; diff --git a/crates/fuel-core/src/schema/da_compressed.rs b/crates/fuel-core/src/schema/da_compressed.rs index 3af336f8ba9..2f2939be83f 100644 --- a/crates/fuel-core/src/schema/da_compressed.rs +++ b/crates/fuel-core/src/schema/da_compressed.rs @@ -4,10 +4,9 @@ use super::{ }; use crate::{ fuel_core_graphql_api::{ + query_costs, IntoApiResult, - QUERY_COSTS, }, - query::da_compressed::DaCompressedBlockData, schema::scalars::U32, }; use async_graphql::{ @@ -37,7 +36,7 @@ pub struct DaCompressedBlockQuery; #[Object] impl DaCompressedBlockQuery { - #[graphql(complexity = "QUERY_COSTS.da_compressed_block_read")] + #[graphql(complexity = "query_costs().da_compressed_block_read")] async fn da_compressed_block( &self, ctx: &Context<'_>, diff --git a/crates/fuel-core/src/schema/gas_price.rs b/crates/fuel-core/src/schema/gas_price.rs index f1ded5fc106..7f7c7555e6d 100644 --- a/crates/fuel-core/src/schema/gas_price.rs +++ b/crates/fuel-core/src/schema/gas_price.rs @@ -5,11 +5,7 @@ use super::scalars::{ use crate::{ graphql_api::{ api_service::GasPriceProvider, - QUERY_COSTS, - }, - query::{ - BlockQueryData, - SimpleTransactionData, + query_costs, }, schema::ReadViewProvider, }; @@ -46,7 +42,7 @@ pub struct LatestGasPriceQuery {} #[Object] impl LatestGasPriceQuery { - #[graphql(complexity = "QUERY_COSTS.block_header")] + #[graphql(complexity = "query_costs().block_header")] async fn latest_gas_price( &self, ctx: &Context<'_>, @@ -84,7 +80,7 @@ pub struct EstimateGasPriceQuery {} #[Object] impl EstimateGasPriceQuery { - #[graphql(complexity = "2 * QUERY_COSTS.storage_read")] + #[graphql(complexity = "2 * query_costs().storage_read")] async fn estimate_gas_price( &self, ctx: &Context<'_>, diff --git a/crates/fuel-core/src/schema/message.rs b/crates/fuel-core/src/schema/message.rs index 8ba18784bf8..ff5da25ab8f 100644 --- a/crates/fuel-core/src/schema/message.rs +++ b/crates/fuel-core/src/schema/message.rs @@ -11,12 +11,8 @@ use super::{ ReadViewProvider, }; use crate::{ - fuel_core_graphql_api::{ - ports::OffChainDatabase, - QUERY_COSTS, - }, + fuel_core_graphql_api::query_costs, graphql_api::IntoApiResult, - query::MessageQueryData, schema::scalars::{ BlockId, U32, @@ -32,7 +28,9 @@ use async_graphql::{ Enum, Object, }; +use fuel_core_services::stream::IntoBoxStream; use fuel_core_types::entities; +use futures::StreamExt; pub struct Message(pub(crate) entities::relayer::message::Message); @@ -68,7 +66,7 @@ pub struct MessageQuery {} #[Object] impl MessageQuery { - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn message( &self, ctx: &Context<'_>, @@ -80,9 +78,9 @@ impl MessageQuery { } #[graphql(complexity = "{\ - QUERY_COSTS.storage_iterator\ - + (QUERY_COSTS.storage_read + first.unwrap_or_default() as usize) * child_complexity \ - + (QUERY_COSTS.storage_read + last.unwrap_or_default() as usize) * child_complexity\ + query_costs().storage_iterator\ + + (query_costs().storage_read + first.unwrap_or_default() as usize) * child_complexity \ + + (query_costs().storage_read + last.unwrap_or_default() as usize) * child_complexity\ }")] async fn messages( &self, @@ -95,6 +93,8 @@ impl MessageQuery { ) -> async_graphql::Result> { let query = ctx.read_view()?; + let owner = owner.map(|owner| owner.0); + let owner_ref = owner.as_ref(); crate::schema::query_pagination( after, before, @@ -107,10 +107,12 @@ impl MessageQuery { None }; - let messages = if let Some(owner) = owner { - query.owned_messages(&owner.0, start, direction) + let messages = if let Some(owner) = owner_ref { + query + .owned_messages(owner, start, direction) + .into_boxed_ref() } else { - query.all_messages(start, direction) + query.all_messages(start, direction).into_boxed_ref() }; let messages = messages.map(|result| { @@ -126,7 +128,7 @@ impl MessageQuery { } // 256 * QUERY_COSTS.storage_read because the depth of the Merkle tree in the worst case is 256 - #[graphql(complexity = "256 * QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "256 * query_costs().storage_read + child_complexity")] async fn message_proof( &self, ctx: &Context<'_>, @@ -157,7 +159,7 @@ impl MessageQuery { .map(MessageProof)) } - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn message_status( &self, ctx: &Context<'_>, diff --git a/crates/fuel-core/src/schema/node_info.rs b/crates/fuel-core/src/schema/node_info.rs index f58e28a1d3b..5805c76dc60 100644 --- a/crates/fuel-core/src/schema/node_info.rs +++ b/crates/fuel-core/src/schema/node_info.rs @@ -3,8 +3,8 @@ use super::scalars::{ U64, }; use crate::fuel_core_graphql_api::{ + query_costs, Config as GraphQLConfig, - QUERY_COSTS, }; use async_graphql::{ Context, @@ -42,7 +42,7 @@ impl NodeInfo { self.node_version.to_owned() } - #[graphql(complexity = "QUERY_COSTS.get_peers + child_complexity")] + #[graphql(complexity = "query_costs().get_peers + child_complexity")] async fn peers(&self, _ctx: &Context<'_>) -> async_graphql::Result> { #[cfg(feature = "p2p")] { @@ -66,7 +66,7 @@ pub struct NodeQuery {} #[Object] impl NodeQuery { - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn node_info(&self, ctx: &Context<'_>) -> async_graphql::Result { let config = ctx.data_unchecked::(); diff --git a/crates/fuel-core/src/schema/relayed_tx.rs b/crates/fuel-core/src/schema/relayed_tx.rs index 1bc915df1bf..6ae34b81c07 100644 --- a/crates/fuel-core/src/schema/relayed_tx.rs +++ b/crates/fuel-core/src/schema/relayed_tx.rs @@ -1,8 +1,5 @@ use crate::{ - fuel_core_graphql_api::{ - ports::DatabaseRelayedTransactions, - QUERY_COSTS, - }, + fuel_core_graphql_api::query_costs, schema::{ scalars::{ RelayedTransactionId, @@ -26,14 +23,14 @@ pub struct RelayedTransactionQuery {} #[Object] impl RelayedTransactionQuery { - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn relayed_transaction_status( &self, ctx: &Context<'_>, #[graphql(desc = "The id of the relayed tx")] id: RelayedTransactionId, ) -> async_graphql::Result> { let query = ctx.read_view()?; - let status = query.transaction_status(id.0)?.map(|status| status.into()); + let status = query.relayed_tx_status(id.0)?.map(|status| status.into()); Ok(status) } } diff --git a/crates/fuel-core/src/schema/tx.rs b/crates/fuel-core/src/schema/tx.rs index d0a1474340a..8ee21edf972 100644 --- a/crates/fuel-core/src/schema/tx.rs +++ b/crates/fuel-core/src/schema/tx.rs @@ -6,9 +6,8 @@ use crate::{ ConsensusProvider, TxPool, }, - ports::OffChainDatabase, + query_costs, IntoApiResult, - QUERY_COSTS, }, graphql_api::{ database::ReadView, @@ -16,9 +15,6 @@ use crate::{ }, query::{ transaction_status_change, - BlockQueryData, - SimpleTransactionData, - TransactionQueryData, TxnStatusChangeState, }, schema::{ @@ -71,12 +67,10 @@ use futures::{ Stream, TryStreamExt, }; -use itertools::Itertools; use std::{ borrow::Cow, iter, }; -use tokio_stream::StreamExt; use types::{ DryRunTransactionExecutionStatus, Transaction, @@ -93,7 +87,7 @@ pub struct TxQuery; #[Object] impl TxQuery { - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn transaction( &self, ctx: &Context<'_>, @@ -115,7 +109,7 @@ impl TxQuery { // We assume that each block has 100 transactions. #[graphql(complexity = "{\ - (QUERY_COSTS.tx_get + child_complexity) \ + (query_costs().tx_get + child_complexity) \ * (first.unwrap_or_default() as usize + last.unwrap_or_default() as usize) }")] async fn transactions( @@ -128,7 +122,9 @@ impl TxQuery { ) -> async_graphql::Result< Connection, > { + use futures::stream::StreamExt; let query = ctx.read_view()?; + let query_ref = query.as_ref(); crate::schema::query_pagination( after, before, @@ -137,41 +133,58 @@ impl TxQuery { |start: &Option, direction| { let start = *start; let block_id = start.map(|sorted| sorted.block_height); - let all_block_ids = query.compressed_blocks(block_id, direction); + let compressed_blocks = query.compressed_blocks(block_id, direction); - let all_txs = all_block_ids - .map(move |block| { - block.map(|fuel_block| { - let (header, mut txs) = fuel_block.into_inner(); + let all_txs = compressed_blocks + .map_ok(move |fuel_block| { + let (header, mut txs) = fuel_block.into_inner(); - if direction == IterDirection::Reverse { - txs.reverse(); - } + if direction == IterDirection::Reverse { + txs.reverse(); + } - txs.into_iter().zip(iter::repeat(*header.height())) - }) + let iter = txs.into_iter().zip(iter::repeat(*header.height())); + futures::stream::iter(iter).map(Ok) }) - .flatten_ok() - .map(|result| { - result.map(|(tx_id, block_height)| { - SortedTxCursor::new(block_height, tx_id.into()) - }) + .try_flatten() + .map_ok(|(tx_id, block_height)| { + SortedTxCursor::new(block_height, tx_id.into()) }) - .skip_while(move |result| { - if let Ok(sorted) = result { - if let Some(start) = start { - return sorted != &start - } - } - false - }); - let all_txs = all_txs.map(|result: StorageResult| { - result.and_then(|sorted| { - let tx = query.transaction(&sorted.tx_id.0)?; + .try_skip_while(move |sorted| { + let skip = if let Some(start) = start { + sorted != &start + } else { + false + }; + + async move { Ok::<_, StorageError>(skip) } + }) + .chunks(query_ref.batch_size) + .map(|chunk| { + use itertools::Itertools; - Ok((sorted, Transaction::from_tx(sorted.tx_id.0, tx))) + let chunk = chunk.into_iter().try_collect::<_, Vec<_>, _>()?; + Ok::<_, StorageError>(chunk) + }) + .try_filter_map(move |chunk| { + let async_query = query_ref.clone(); + async move { + let tx_ids = chunk + .iter() + .map(|sorted| sorted.tx_id.0) + .collect::>(); + let txs = async_query.transactions(tx_ids).await; + let txs = txs.into_iter().zip(chunk.into_iter()).map( + |(result, sorted)| { + result.map(|tx| { + (sorted, Transaction::from_tx(sorted.tx_id.0, tx)) + }) + }, + ); + Ok(Some(futures::stream::iter(txs))) + } }) - }); + .try_flatten(); Ok(all_txs) }, @@ -180,9 +193,9 @@ impl TxQuery { } #[graphql(complexity = "{\ - QUERY_COSTS.storage_iterator\ - + (QUERY_COSTS.storage_read + first.unwrap_or_default() as usize) * child_complexity \ - + (QUERY_COSTS.storage_read + last.unwrap_or_default() as usize) * child_complexity\ + query_costs().storage_iterator\ + + (query_costs().storage_read + first.unwrap_or_default() as usize) * child_complexity \ + + (query_costs().storage_read + last.unwrap_or_default() as usize) * child_complexity\ }")] async fn transactions_by_owner( &self, @@ -194,6 +207,7 @@ impl TxQuery { before: Option, ) -> async_graphql::Result> { + use futures::stream::StreamExt; let query = ctx.read_view()?; let params = ctx .data_unchecked::() @@ -223,7 +237,7 @@ impl TxQuery { } /// Estimate the predicate gas for the provided transaction - #[graphql(complexity = "QUERY_COSTS.estimate_predicates + child_complexity")] + #[graphql(complexity = "query_costs().estimate_predicates + child_complexity")] async fn estimate_predicates( &self, ctx: &Context<'_>, @@ -268,7 +282,7 @@ pub struct TxMutation; impl TxMutation { /// Execute a dry-run of multiple transactions using a fork of current state, no changes are committed. #[graphql( - complexity = "QUERY_COSTS.dry_run * txs.len() + child_complexity * txs.len()" + complexity = "query_costs().dry_run * txs.len() + child_complexity * txs.len()" )] async fn dry_run( &self, @@ -320,7 +334,7 @@ impl TxMutation { /// Submits transaction to the `TxPool`. /// /// Returns submitted transaction if the transaction is included in the `TxPool` without problems. - #[graphql(complexity = "QUERY_COSTS.submit + child_complexity")] + #[graphql(complexity = "query_costs().submit + child_complexity")] async fn submit( &self, ctx: &Context<'_>, @@ -360,7 +374,7 @@ impl TxStatusSubscription { /// then the updates arrive. In such a case the stream will close without /// a status. If this occurs the stream can simply be restarted to return /// the latest status. - #[graphql(complexity = "QUERY_COSTS.status_change + child_complexity")] + #[graphql(complexity = "query_costs().status_change + child_complexity")] async fn status_change<'a>( &self, ctx: &'a Context<'a>, @@ -380,7 +394,7 @@ impl TxStatusSubscription { } /// Submits transaction to the `TxPool` and await either confirmation or failure. - #[graphql(complexity = "QUERY_COSTS.submit_and_await + child_complexity")] + #[graphql(complexity = "query_costs().submit_and_await + child_complexity")] async fn submit_and_await<'a>( &self, ctx: &'a Context<'a>, @@ -388,6 +402,7 @@ impl TxStatusSubscription { ) -> async_graphql::Result< impl Stream> + 'a, > { + use tokio_stream::StreamExt; let subscription = submit_and_await_status(ctx, tx).await?; Ok(subscription @@ -398,7 +413,7 @@ impl TxStatusSubscription { /// Submits the transaction to the `TxPool` and returns a stream of events. /// Compared to the `submitAndAwait`, the stream also contains ` /// SubmittedStatus` as an intermediate state. - #[graphql(complexity = "QUERY_COSTS.submit_and_await + child_complexity")] + #[graphql(complexity = "query_costs().submit_and_await + child_complexity")] async fn submit_and_await_status<'a>( &self, ctx: &'a Context<'a>, @@ -416,6 +431,7 @@ async fn submit_and_await_status<'a>( ) -> async_graphql::Result< impl Stream> + 'a, > { + use tokio_stream::StreamExt; let txpool = ctx.data_unchecked::(); let params = ctx .data_unchecked::() diff --git a/crates/fuel-core/src/schema/tx/types.rs b/crates/fuel-core/src/schema/tx/types.rs index 820271430e2..effbc463d0c 100644 --- a/crates/fuel-core/src/schema/tx/types.rs +++ b/crates/fuel-core/src/schema/tx/types.rs @@ -10,13 +10,8 @@ use crate::{ TxPool, }, database::ReadView, + query_costs, IntoApiResult, - QUERY_COSTS, - }, - query::{ - SimpleBlockData, - SimpleTransactionData, - TransactionQueryData, }, schema::{ block::Block, @@ -182,14 +177,14 @@ impl SuccessStatus { self.block_height.into() } - #[graphql(complexity = "QUERY_COSTS.block_header + child_complexity")] + #[graphql(complexity = "query_costs().block_header + child_complexity")] async fn block(&self, ctx: &Context<'_>) -> async_graphql::Result { let query = ctx.read_view()?; let block = query.block(&self.block_height)?; Ok(block.into()) } - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn transaction(&self, ctx: &Context<'_>) -> async_graphql::Result { let query = ctx.read_view()?; let transaction = query.transaction(&self.tx_id)?; @@ -238,14 +233,14 @@ impl FailureStatus { self.block_height.into() } - #[graphql(complexity = "QUERY_COSTS.block_header + child_complexity")] + #[graphql(complexity = "query_costs().block_header + child_complexity")] async fn block(&self, ctx: &Context<'_>) -> async_graphql::Result { let query = ctx.read_view()?; let block = query.block(&self.block_height)?; Ok(block.into()) } - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn transaction(&self, ctx: &Context<'_>) -> async_graphql::Result { let query = ctx.read_view()?; let transaction = query.transaction(&self.tx_id)?; @@ -417,7 +412,7 @@ impl Transaction { TransactionId(self.1) } - #[graphql(complexity = "QUERY_COSTS.storage_read")] + #[graphql(complexity = "query_costs().storage_read")] async fn input_asset_ids(&self, ctx: &Context<'_>) -> Option> { let params = ctx .data_unchecked::() @@ -693,7 +688,7 @@ impl Transaction { } } - #[graphql(complexity = "QUERY_COSTS.tx_status_read + child_complexity")] + #[graphql(complexity = "query_costs().tx_status_read + child_complexity")] async fn status( &self, ctx: &Context<'_>, @@ -848,7 +843,7 @@ impl Transaction { } } - #[graphql(complexity = "QUERY_COSTS.tx_raw_payload")] + #[graphql(complexity = "query_costs().tx_raw_payload")] /// Return the transaction bytes using canonical encoding async fn raw_payload(&self) -> HexString { HexString(self.0.clone().to_bytes()) @@ -992,7 +987,7 @@ pub(crate) async fn get_tx_status( txpool: &TxPool, ) -> Result, StorageError> { match query - .status(&id) + .tx_status(&id) .into_api_result::()? { Some(status) => { diff --git a/crates/fuel-core/src/schema/upgrades.rs b/crates/fuel-core/src/schema/upgrades.rs index d9e62906a1e..a4591ccd155 100644 --- a/crates/fuel-core/src/schema/upgrades.rs +++ b/crates/fuel-core/src/schema/upgrades.rs @@ -1,10 +1,9 @@ use crate::{ graphql_api::{ api_service::ConsensusProvider, + query_costs, IntoApiResult, - QUERY_COSTS, }, - query::UpgradeQueryData, schema::{ chain::ConsensusParameters, scalars::HexString, @@ -30,7 +29,7 @@ pub struct UpgradeQuery; #[Object] impl UpgradeQuery { - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn consensus_parameters( &self, ctx: &Context<'_>, @@ -43,7 +42,7 @@ impl UpgradeQuery { Ok(ConsensusParameters(params)) } - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn state_transition_bytecode_by_version( &self, ctx: &Context<'_>, @@ -55,7 +54,7 @@ impl UpgradeQuery { .into_api_result() } - #[graphql(complexity = "QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "query_costs().storage_read + child_complexity")] async fn state_transition_bytecode_by_root( &self, root: HexString, @@ -74,7 +73,7 @@ impl StateTransitionBytecode { HexString(self.root.to_vec()) } - #[graphql(complexity = "QUERY_COSTS.state_transition_bytecode_read")] + #[graphql(complexity = "query_costs().state_transition_bytecode_read")] async fn bytecode( &self, ctx: &Context<'_>, diff --git a/crates/fuel-core/src/service/adapters/block_importer.rs b/crates/fuel-core/src/service/adapters/block_importer.rs index 62006c7847e..5b8a06e7f1e 100644 --- a/crates/fuel-core/src/service/adapters/block_importer.rs +++ b/crates/fuel-core/src/service/adapters/block_importer.rs @@ -62,8 +62,11 @@ impl BlockImporterAdapter { executor: ExecutorAdapter, verifier: VerifierAdapter, ) -> Self { + let metrics = config.metrics; let importer = Importer::new(chain_id, config, database, executor, verifier); - importer.init_metrics(); + if metrics { + importer.init_metrics(); + } Self { block_importer: Arc::new(importer), } diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index e5473135d6d..2dd6602a979 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -104,7 +104,7 @@ impl Config { #[cfg(feature = "test-helpers")] pub fn local_node_with_reader(snapshot_reader: SnapshotReader) -> Self { - let block_importer = fuel_core_importer::Config::new(); + let block_importer = fuel_core_importer::Config::new(false); let latest_block = snapshot_reader.last_block_config(); // In tests, we always want to use the native executor as a default configuration. let native_executor_version = latest_block @@ -138,6 +138,8 @@ impl Config { std::net::Ipv4Addr::new(127, 0, 0, 1).into(), 0, ), + number_of_threads: 0, + database_batch_size: 100, max_queries_depth: 16, max_queries_complexity: 80000, max_queries_recursive_depth: 16, @@ -147,6 +149,7 @@ impl Config { request_body_bytes_limit: 16 * 1024 * 1024, query_log_threshold_time: Duration::from_secs(2), api_request_timeout: Duration::from_secs(60), + costs: Default::default(), }, combined_db_config, continue_on_error: false, diff --git a/crates/fuel-core/src/state/rocks_db_key_iterator.rs b/crates/fuel-core/src/state/rocks_db_key_iterator.rs index b77e2cb7179..432ab6b41ae 100644 --- a/crates/fuel-core/src/state/rocks_db_key_iterator.rs +++ b/crates/fuel-core/src/state/rocks_db_key_iterator.rs @@ -18,9 +18,9 @@ pub struct RocksDBKeyIterator<'a, D: DBAccess, R> { _marker: core::marker::PhantomData, } -pub trait ExtractItem: 'static { +pub trait ExtractItem: Send + Sync + 'static { /// The item type returned by the iterator. - type Item; + type Item: Send + Sync; /// Extracts the item from the raw iterator. fn extract_item( diff --git a/crates/metrics/Cargo.toml b/crates/metrics/Cargo.toml index 7851071b268..447c48c4ef9 100644 --- a/crates/metrics/Cargo.toml +++ b/crates/metrics/Cargo.toml @@ -11,10 +11,13 @@ repository = { workspace = true } description = "Fuel metrics" [dependencies] +once_cell = { workspace = true } parking_lot = { workspace = true } pin-project-lite = { workspace = true } prometheus-client = { workspace = true } regex = "1" +strum = { workspace = true } +strum_macros = { workspace = true } tracing = { workspace = true } [dev-dependencies] diff --git a/crates/metrics/src/buckets.rs b/crates/metrics/src/buckets.rs new file mode 100644 index 00000000000..0b51e1b70c0 --- /dev/null +++ b/crates/metrics/src/buckets.rs @@ -0,0 +1,65 @@ +use std::{ + collections::HashMap, + sync::OnceLock, +}; +#[cfg(test)] +use strum_macros::EnumIter; + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(test, derive(EnumIter))] +pub(crate) enum Buckets { + Timing, +} +static BUCKETS: OnceLock>> = OnceLock::new(); +pub(crate) fn buckets(b: Buckets) -> impl Iterator { + BUCKETS.get_or_init(initialize_buckets)[&b].iter().copied() +} + +#[rustfmt::skip] +fn initialize_buckets() -> HashMap> { + [ + ( + Buckets::Timing, + vec![ + 0.005, + 0.010, + 0.025, + 0.050, + 0.100, + 0.250, + 0.500, + 1.000, + 2.500, + 5.000, + 10.000, + ], + ), + ] + .into_iter() + .collect() +} + +#[cfg(test)] +mod tests { + use strum::IntoEnumIterator; + + use crate::buckets::Buckets; + + use super::initialize_buckets; + + #[test] + fn buckets_are_defined_for_every_variant() { + let actual_buckets = initialize_buckets(); + let actual_buckets = actual_buckets.keys().collect::>(); + + let required_buckets: Vec<_> = Buckets::iter().collect(); + + assert_eq!(required_buckets.len(), actual_buckets.len()); + + let all_buckets_defined = required_buckets + .iter() + .all(|required_bucket| actual_buckets.contains(&required_bucket)); + + assert!(all_buckets_defined) + } +} diff --git a/crates/metrics/src/config.rs b/crates/metrics/src/config.rs new file mode 100644 index 00000000000..77c7fd297ff --- /dev/null +++ b/crates/metrics/src/config.rs @@ -0,0 +1,51 @@ +use once_cell::sync::Lazy; +use strum::IntoEnumIterator; +use strum_macros::{ + Display, + EnumIter, + EnumString, +}; + +#[derive(Debug, Display, Clone, Copy, PartialEq, EnumString, EnumIter)] +#[strum(serialize_all = "lowercase")] +pub enum Module { + All, + Importer, + P2P, + Producer, + TxPool, /* TODO[RC]: Not used. Add support in https://github.com/FuelLabs/fuel-core/pull/2321 */ + GraphQL, // TODO[RC]: Not used... yet. +} + +/// Configuration for disabling metrics. +pub trait DisableConfig { + /// Returns `true` if the given module is enabled. + fn is_enabled(&self, module: Module) -> bool; + + /// Returns the list of enabled modules. + fn list_of_enabled(&self) -> Vec; +} + +impl DisableConfig for Vec { + fn is_enabled(&self, module: Module) -> bool { + !self.contains(&module) && !self.contains(&Module::All) + } + + fn list_of_enabled(&self) -> Vec { + Module::iter() + .filter(|module| self.is_enabled(*module) && *module != Module::All) + .collect() + } +} + +static HELP_STRING: Lazy = Lazy::new(|| { + let all_modules: Vec<_> = Module::iter().map(|module| module.to_string()).collect(); + format!( + "Comma-separated list of modules or 'all' to disable all metrics. Available options: {}, all", + all_modules.join(", ") + ) +}); + +pub fn help_string() -> &'static str { + &HELP_STRING +} diff --git a/crates/metrics/src/futures/future_tracker.rs b/crates/metrics/src/futures/future_tracker.rs index 26f00d1b59c..a5e73aa864a 100644 --- a/crates/metrics/src/futures/future_tracker.rs +++ b/crates/metrics/src/futures/future_tracker.rs @@ -150,7 +150,9 @@ impl Future for FutureTracker { #[cfg(test)] mod tests { - use super::*; + use std::time::Duration; + + use crate::futures::future_tracker::FutureTracker; #[tokio::test] async fn empty_future() { diff --git a/crates/metrics/src/graphql_metrics.rs b/crates/metrics/src/graphql_metrics.rs index 883ee31c2c4..383d649109a 100644 --- a/crates/metrics/src/graphql_metrics.rs +++ b/crates/metrics/src/graphql_metrics.rs @@ -1,6 +1,9 @@ use crate::{ + buckets::{ + buckets, + Buckets, + }, global_registry, - timing_buckets, }; use prometheus_client::{ encoding::EncodeLabelSet, @@ -22,16 +25,23 @@ pub struct GraphqlMetrics { // using gauges in case blocks are rolled back for any reason pub total_txs_count: Gauge, requests: Family, + queries_complexity: Histogram, } impl GraphqlMetrics { fn new() -> Self { let tx_count_gauge = Gauge::default(); + let queries_complexity = Histogram::new(buckets_complexity()); let requests = Family::::new_with_constructor(|| { - Histogram::new(timing_buckets().iter().cloned()) + Histogram::new(buckets(Buckets::Timing)) }); let mut registry = global_registry().registry.lock(); registry.register("graphql_request_duration_seconds", "", requests.clone()); + registry.register( + "graphql_query_complexity", + "The complexity of all queries received", + queries_complexity.clone(), + ); registry.register( "importer_tx_count", @@ -41,6 +51,7 @@ impl GraphqlMetrics { Self { total_txs_count: tx_count_gauge, + queries_complexity, requests, } } @@ -51,9 +62,30 @@ impl GraphqlMetrics { }); histogram.observe(time); } + + pub fn graphql_complexity_observe(&self, complexity: f64) { + self.queries_complexity.observe(complexity); + } } static GRAPHQL_METRICS: OnceLock = OnceLock::new(); pub fn graphql_metrics() -> &'static GraphqlMetrics { GRAPHQL_METRICS.get_or_init(GraphqlMetrics::new) } + +fn buckets_complexity() -> impl Iterator { + [ + 1_000.0, + 5_000.0, + 10_000.0, + 20_000.0, + 50_000.0, + 100_000.0, + 250_000.0, + 500_000.0, + 1_000_000.0, + 5_000_000.0, + 10_000_000.0, + ] + .into_iter() +} diff --git a/crates/metrics/src/importer.rs b/crates/metrics/src/importer.rs index 3f0bb4caec0..0cf460edc3a 100644 --- a/crates/metrics/src/importer.rs +++ b/crates/metrics/src/importer.rs @@ -1,6 +1,9 @@ use crate::{ + buckets::{ + buckets, + Buckets, + }, global_registry, - timing_buckets, }; use prometheus_client::metrics::{ gauge::Gauge, @@ -15,14 +18,21 @@ pub struct ImporterMetrics { pub block_height: Gauge, pub latest_block_import_timestamp: Gauge, pub execute_and_commit_duration: Histogram, + pub gas_per_block: Gauge, + pub fee_per_block: Gauge, + pub transactions_per_block: Gauge, + pub gas_price: Gauge, } impl Default for ImporterMetrics { fn default() -> Self { let block_height_gauge = Gauge::default(); let latest_block_import_ms = Gauge::default(); - let execute_and_commit_duration = - Histogram::new(timing_buckets().iter().cloned()); + let execute_and_commit_duration = Histogram::new(buckets(Buckets::Timing)); + let gas_per_block = Gauge::default(); + let fee_per_block = Gauge::default(); + let transactions_per_block = Gauge::default(); + let gas_price = Gauge::default(); let mut registry = global_registry().registry.lock(); registry.register( @@ -43,10 +53,38 @@ impl Default for ImporterMetrics { execute_and_commit_duration.clone(), ); + registry.register( + "importer_gas_per_block", + "The total gas used in a block", + gas_per_block.clone(), + ); + + registry.register( + "importer_fee_per_block_gwei", + "The total fee (gwei) paid by transactions in a block", + fee_per_block.clone(), + ); + + registry.register( + "importer_transactions_per_block", + "The total number of transactions in a block", + transactions_per_block.clone(), + ); + + registry.register( + "importer_gas_price_for_block", + "The gas prices used in a block", + transactions_per_block.clone(), + ); + Self { block_height: block_height_gauge, latest_block_import_timestamp: latest_block_import_ms, execute_and_commit_duration, + gas_per_block, + fee_per_block, + transactions_per_block, + gas_price, } } } diff --git a/crates/metrics/src/lib.rs b/crates/metrics/src/lib.rs index f7e4e22f7ca..399b34c51e5 100644 --- a/crates/metrics/src/lib.rs +++ b/crates/metrics/src/lib.rs @@ -19,6 +19,8 @@ pub struct GlobalRegistry { pub registry: parking_lot::Mutex, } +mod buckets; +pub mod config; pub mod core_metrics; pub mod futures; pub mod graphql_metrics; @@ -26,16 +28,6 @@ pub mod importer; pub mod p2p_metrics; pub mod txpool_metrics; -// recommended bucket defaults for logging response times -static BUCKETS: OnceLock> = OnceLock::new(); -pub fn timing_buckets() -> &'static Vec { - BUCKETS.get_or_init(|| { - vec![ - 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, - ] - }) -} - static GLOBAL_REGISTER: OnceLock = OnceLock::new(); pub fn global_registry() -> &'static GlobalRegistry { diff --git a/crates/services/Cargo.toml b/crates/services/Cargo.toml index 345bcd64287..34d0726cdca 100644 --- a/crates/services/Cargo.toml +++ b/crates/services/Cargo.toml @@ -15,6 +15,7 @@ async-trait = { workspace = true } fuel-core-metrics = { workspace = true } futures = { workspace = true } parking_lot = { workspace = true } +pin-project-lite = { workspace = true } rayon = { workspace = true, optional = true } tokio = { workspace = true, features = ["full"] } tracing = { workspace = true } diff --git a/crates/services/consensus_module/poa/src/service.rs b/crates/services/consensus_module/poa/src/service.rs index 04255ac108a..ffb88f8db2a 100644 --- a/crates/services/consensus_module/poa/src/service.rs +++ b/crates/services/consensus_module/poa/src/service.rs @@ -573,10 +573,6 @@ where should_continue = false; } } - _ = self.new_txs_watcher.changed() => { - self.on_txpool_event().await.context("While processing txpool event")?; - should_continue = true; - } _ = next_block_production => { match self.on_timer().await.context("While processing timer event") { Ok(()) => should_continue = true, @@ -587,6 +583,10 @@ where } }; } + _ = self.new_txs_watcher.changed() => { + self.on_txpool_event().await.context("While processing txpool event")?; + should_continue = true; + } } Ok(should_continue) diff --git a/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs b/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs index 21cc4a22a3f..8b403e97fc1 100644 --- a/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs +++ b/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs @@ -478,3 +478,32 @@ async fn interval_trigger_produces_blocks_in_the_future_when_time_rewinds() { // similarly to how it works when time is lagging. assert_eq!(second_block_time, start_time + block_time.as_secs() * 2); } + +#[tokio::test] +async fn interval_trigger_even_if_queued_tx_events() { + let block_time = Duration::from_secs(2); + let mut ctx = DefaultContext::new(Config { + trigger: Trigger::Interval { block_time }, + signer: SignMode::Key(test_signing_key()), + metrics: false, + ..Default::default() + }) + .await; + let block_creation_notifier = Arc::new(Notify::new()); + tokio::task::spawn({ + let notifier = ctx.new_txs_notifier.clone(); + async move { + loop { + time::sleep(Duration::from_nanos(10)).await; + notifier.send_replace(()); + } + } + }); + let block_creation_waiter = block_creation_notifier.clone(); + tokio::task::spawn(async move { + ctx.block_import.recv().await.unwrap(); + dbg!("First block produced"); + block_creation_notifier.notify_waiters(); + }); + block_creation_waiter.notified().await; +} diff --git a/crates/services/importer/src/config.rs b/crates/services/importer/src/config.rs index f959724abb2..d5854d5141f 100644 --- a/crates/services/importer/src/config.rs +++ b/crates/services/importer/src/config.rs @@ -1,18 +1,20 @@ #[derive(Debug, Clone)] pub struct Config { pub max_block_notify_buffer: usize, + pub metrics: bool, } impl Config { - pub fn new() -> Self { + pub fn new(metrics: bool) -> Self { Self { max_block_notify_buffer: 1 << 10, + metrics, } } } impl Default for Config { fn default() -> Self { - Self::new() + Self::new(false) } } diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index d877390cbaa..4479c4efd37 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -25,6 +25,10 @@ use fuel_core_types::{ primitives::BlockId, SealedBlock, }, + fuel_tx::{ + field::MintGasPrice, + Transaction, + }, fuel_types::{ BlockHeight, ChainId, @@ -34,8 +38,10 @@ use fuel_core_types::{ ImportResult, UncommittedResult, }, - executor, - executor::ValidationResult, + executor::{ + self, + ValidationResult, + }, Uncommitted, }, }; @@ -58,6 +64,7 @@ use tokio::sync::{ Semaphore, TryAcquireError, }; +use tracing::warn; #[cfg(test)] pub mod test; @@ -126,6 +133,8 @@ pub struct Importer { /// the resolution of the previous one. active_import_results: Arc, process_thread: rayon::ThreadPool, + /// Enables prometheus metrics for this fuel-service + metrics: bool, } impl Importer { @@ -155,6 +164,7 @@ impl Importer { active_import_results: Arc::new(Semaphore::new(max_block_notify_buffer)), guard: Semaphore::new(1), process_thread, + metrics: config.metrics, } } @@ -341,18 +351,9 @@ where db_after_execution.commit()?; - // update the importer metrics after the block is successfully committed - importer_metrics() - .block_height - .set(*actual_next_height.deref() as i64); - let current_time = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs_f64(); - importer_metrics() - .latest_block_import_timestamp - .set(current_time); - + if self.metrics { + Self::update_metrics(&result, &actual_next_height); + } tracing::info!("Committed block {:#x}", result.sealed_block.entity.id()); let result = ImporterResult { @@ -391,6 +392,52 @@ where .latest_block_import_timestamp .set(current_time); } + + fn update_metrics(result: &ImportResult, actual_next_height: &BlockHeight) { + let (total_gas_used, total_fee): (u64, u64) = result + .tx_status + .iter() + .map(|tx_result| { + (*tx_result.result.total_gas(), *tx_result.result.total_fee()) + }) + .fold((0_u64, 0_u64), |(acc_gas, acc_fee), (used_gas, fee)| { + ( + acc_gas.saturating_add(used_gas), + acc_fee.saturating_add(fee), + ) + }); + let maybe_last_tx = result.sealed_block.entity.transactions().last(); + if let Some(last_tx) = maybe_last_tx { + if let Transaction::Mint(mint) = last_tx { + importer_metrics() + .gas_price + .set((*mint.gas_price()).try_into().unwrap_or(i64::MAX)); + } else { + warn!("Last transaction is not a mint transaction"); + } + } + + let total_transactions = result.tx_status.len(); + importer_metrics() + .block_height + .set(*actual_next_height.deref() as i64); + let current_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs_f64(); + importer_metrics() + .latest_block_import_timestamp + .set(current_time); + importer_metrics() + .gas_per_block + .set(total_gas_used.try_into().unwrap_or(i64::MAX)); + importer_metrics() + .fee_per_block + .set(total_fee.try_into().unwrap_or(i64::MAX)); + importer_metrics() + .transactions_per_block + .set(total_transactions.try_into().unwrap_or(i64::MAX)); + } } impl Importer diff --git a/crates/services/p2p/Cargo.toml b/crates/services/p2p/Cargo.toml index 718e3bbf0d7..5455e83c5f3 100644 --- a/crates/services/p2p/Cargo.toml +++ b/crates/services/p2p/Cargo.toml @@ -36,6 +36,7 @@ libp2p = { version = "0.53.2", default-features = false, features = [ "tokio", "yamux", "websocket", + "metrics", ] } libp2p-mplex = "0.41.0" postcard = { workspace = true, features = ["use-std"] } diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index eb5a5a75a6c..024cc006785 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -36,7 +36,10 @@ use crate::{ }, TryPeerId, }; -use fuel_core_metrics::p2p_metrics::increment_unique_peers; +use fuel_core_metrics::{ + global_registry, + p2p_metrics::increment_unique_peers, +}; use fuel_core_types::{ fuel_types::BlockHeight, services::p2p::peer_reputation::AppScore, @@ -51,6 +54,10 @@ use libp2p::{ TopicHash, }, identify, + metrics::{ + Metrics, + Recorder, + }, multiaddr::Protocol, request_response::{ self, @@ -123,6 +130,9 @@ pub struct FuelP2PService { /// Whether or not metrics collection is enabled metrics: bool, + /// libp2p metrics registry + libp2p_metrics_registry: Option, + /// Holds peers' information, and manages existing connections peer_manager: PeerManager, } @@ -203,6 +213,8 @@ impl FuelP2PService { config: Config, codec: PostcardCodec, ) -> anyhow::Result { + let metrics = config.metrics; + let gossipsub_data = GossipsubData::with_topics(GossipsubTopics::new(&config.network_name)); let network_metadata = NetworkMetadata { gossipsub_data }; @@ -217,7 +229,7 @@ impl FuelP2PService { let tcp_config = tcp::Config::new().port_reuse(true); let behaviour = FuelBehaviour::new(&config, codec.clone())?; - let mut swarm = SwarmBuilder::with_existing_identity(config.keypair.clone()) + let swarm_builder = SwarmBuilder::with_existing_identity(config.keypair.clone()) .with_tokio() .with_tcp( tcp_config, @@ -225,21 +237,41 @@ impl FuelP2PService { libp2p::yamux::Config::default, ) .map_err(|_| anyhow::anyhow!("Failed to build Swarm"))? - .with_dns()? - .with_behaviour(|_| behaviour)? - .with_swarm_config(|cfg| { - if let Some(timeout) = config.connection_idle_timeout { - cfg.with_idle_connection_timeout(timeout) - } else { - cfg - } - }) - .build(); + .with_dns()?; + + let mut libp2p_metrics_registry = None; + let mut swarm = if metrics { + // we use the global registry to store the metrics without needing to create a new one + // since libp2p already creates sub-registries + let mut registry = global_registry().registry.lock(); + libp2p_metrics_registry = Some(Metrics::new(&mut registry)); + + swarm_builder + .with_bandwidth_metrics(&mut registry) + .with_behaviour(|_| behaviour)? + .with_swarm_config(|cfg| { + if let Some(timeout) = config.connection_idle_timeout { + cfg.with_idle_connection_timeout(timeout) + } else { + cfg + } + }) + .build() + } else { + swarm_builder + .with_behaviour(|_| behaviour)? + .with_swarm_config(|cfg| { + if let Some(timeout) = config.connection_idle_timeout { + cfg.with_idle_connection_timeout(timeout) + } else { + cfg + } + }) + .build() + }; let local_peer_id = swarm.local_peer_id().to_owned(); - let metrics = config.metrics; - if let Some(public_address) = config.public_address.clone() { swarm.add_external_address(public_address); } @@ -260,6 +292,7 @@ impl FuelP2PService { inbound_requests_table: HashMap::default(), network_metadata, metrics, + libp2p_metrics_registry, peer_manager: PeerManager::new( reserved_peers_updates, reserved_peers, @@ -312,6 +345,15 @@ impl FuelP2PService { } } + pub fn update_libp2p_metrics(&self, event: &E) + where + Metrics: Recorder, + { + if let Some(registry) = self.libp2p_metrics_registry.as_ref() { + self.update_metrics(|| registry.record(event)); + } + } + #[cfg(feature = "test-helpers")] pub fn multiaddrs(&self) -> Vec { let local_peer = self.local_peer_id; @@ -492,7 +534,10 @@ impl FuelP2PService { ); None } - _ => None, + _ => { + self.update_libp2p_metrics(&event); + None + } } } @@ -517,13 +562,23 @@ impl FuelP2PService { event: FuelBehaviourEvent, ) -> Option { match event { - FuelBehaviourEvent::Gossipsub(event) => self.handle_gossipsub_event(event), + FuelBehaviourEvent::Gossipsub(event) => { + self.update_libp2p_metrics(&event); + self.handle_gossipsub_event(event) + } FuelBehaviourEvent::PeerReport(event) => self.handle_peer_report_event(event), FuelBehaviourEvent::RequestResponse(event) => { self.handle_request_response_event(event) } - FuelBehaviourEvent::Identify(event) => self.handle_identify_event(event), + FuelBehaviourEvent::Identify(event) => { + self.update_libp2p_metrics(&event); + self.handle_identify_event(event) + } FuelBehaviourEvent::Heartbeat(event) => self.handle_heartbeat_event(event), + FuelBehaviourEvent::Discovery(event) => { + self.update_libp2p_metrics(&event); + None + } _ => None, } } diff --git a/crates/services/src/async_processor.rs b/crates/services/src/async_processor.rs index 6a5b43f395f..db929176483 100644 --- a/crates/services/src/async_processor.rs +++ b/crates/services/src/async_processor.rs @@ -12,6 +12,7 @@ use tokio::{ OwnedSemaphorePermit, Semaphore, }, + task::JoinHandle, }; /// A processor that can execute async tasks with a limit on the number of tasks that can be @@ -76,40 +77,50 @@ impl AsyncProcessor { } /// Spawn a task with a reservation. - pub fn spawn_reserved(&self, reservation: AsyncReservation, future: F) + pub fn spawn_reserved( + &self, + reservation: AsyncReservation, + future: F, + ) -> JoinHandle where - F: Future + Send + 'static, + F: Future + Send + 'static, + F::Output: Send, { let permit = reservation.0; let future = async move { let permit = permit; - future.await; - drop(permit) + let result = future.await; + drop(permit); + result }; let metered_future = MeteredFuture::new(future, self.metric.clone()); if let Some(runtime) = &self.thread_pool { - runtime.spawn(metered_future); + runtime.spawn(metered_future) } else { - tokio::spawn(metered_future); + tokio::spawn(metered_future) } } /// Tries to spawn a task. If the task cannot be spawned, returns an error. - pub fn try_spawn(&self, future: F) -> Result<(), OutOfCapacity> + pub fn try_spawn(&self, future: F) -> Result, OutOfCapacity> where - F: Future + Send + 'static, + F: Future + Send + 'static, + F::Output: Send, { let reservation = self.reserve()?; - self.spawn_reserved(reservation, future); - Ok(()) + Ok(self.spawn_reserved(reservation, future)) } } #[cfg(test)] #[allow(clippy::bool_assert_comparison)] +#[allow(non_snake_case)] mod tests { use super::*; + use futures::future::join_all; use std::{ + collections::HashSet, + iter, thread::sleep, time::Duration, }; @@ -129,11 +140,45 @@ mod tests { }); // Then - assert_eq!(result, Ok(())); + result.expect("Expected Ok result"); sleep(Duration::from_secs(1)); receiver.try_recv().unwrap(); } + #[tokio::test] + async fn one_spawn_single_tasks_works__thread_id_is_different_than_main() { + // Given + let number_of_threads = 10; + let number_of_pending_tasks = 10000; + let heavy_task_processor = + AsyncProcessor::new("Test", number_of_threads, number_of_pending_tasks) + .unwrap(); + let main_handler = tokio::spawn(async move { std::thread::current().id() }); + let main_id = main_handler.await.unwrap(); + + // When + let futures = iter::repeat_with(|| { + heavy_task_processor + .try_spawn(async move { + tokio::time::sleep(Duration::from_secs(1)).await; + std::thread::current().id() + }) + .unwrap() + }) + .take(number_of_pending_tasks) + .collect::>(); + + // Then + let thread_ids = join_all(futures).await; + let unique_thread_ids = thread_ids + .into_iter() + .map(|r| r.unwrap()) + .collect::>(); + + assert!(!unique_thread_ids.contains(&main_id)); + assert_eq!(unique_thread_ids.len(), number_of_threads); + } + #[test] fn second_spawn_fails_when_limit_is_one_and_first_in_progress() { // Given @@ -143,7 +188,7 @@ mod tests { let first_spawn_result = heavy_task_processor.try_spawn(async move { sleep(Duration::from_secs(1)); }); - assert_eq!(first_spawn_result, Ok(())); + first_spawn_result.expect("Expected Ok result"); // When let second_spawn_result = heavy_task_processor.try_spawn(async move { @@ -151,7 +196,8 @@ mod tests { }); // Then - assert_eq!(second_spawn_result, Err(OutOfCapacity)); + let err = second_spawn_result.expect_err("Expected Ok result"); + assert_eq!(err, OutOfCapacity); } #[test] @@ -166,7 +212,7 @@ mod tests { sleep(Duration::from_secs(1)); sender.send(()).unwrap(); }); - assert_eq!(first_spawn, Ok(())); + first_spawn.expect("Expected Ok result"); futures::executor::block_on(async move { receiver.await.unwrap(); }); @@ -177,7 +223,7 @@ mod tests { }); // Then - assert_eq!(second_spawn, Ok(())); + second_spawn.expect("Expected Ok result"); } #[test] @@ -194,7 +240,7 @@ mod tests { }); // Then - assert_eq!(result, Ok(())); + result.expect("Expected Ok result"); } } @@ -217,13 +263,15 @@ mod tests { sleep(Duration::from_secs(1)); broadcast_sender.send(()).unwrap(); }); - assert_eq!(result, Ok(())); + result.expect("Expected Ok result"); } drop(broadcast_sender); // Then while broadcast_receiver.recv().await.is_ok() {} assert!(instant.elapsed() >= Duration::from_secs(10)); + // Wait for the metrics to be updated. + tokio::time::sleep(Duration::from_secs(1)).await; let duration = Duration::from_nanos(heavy_task_processor.metric.busy.get()); assert_eq!(duration.as_secs(), 10); let duration = Duration::from_nanos(heavy_task_processor.metric.idle.get()); @@ -249,13 +297,15 @@ mod tests { sleep(Duration::from_secs(1)); broadcast_sender.send(()).unwrap(); }); - assert_eq!(result, Ok(())); + result.expect("Expected Ok result"); } drop(broadcast_sender); // Then while broadcast_receiver.recv().await.is_ok() {} assert!(instant.elapsed() <= Duration::from_secs(2)); + // Wait for the metrics to be updated. + tokio::time::sleep(Duration::from_secs(1)).await; let duration = Duration::from_nanos(heavy_task_processor.metric.busy.get()); assert_eq!(duration.as_secs(), 10); let duration = Duration::from_nanos(heavy_task_processor.metric.idle.get()); @@ -281,13 +331,15 @@ mod tests { tokio::time::sleep(Duration::from_secs(1)).await; broadcast_sender.send(()).unwrap(); }); - assert_eq!(result, Ok(())); + result.expect("Expected Ok result"); } drop(broadcast_sender); // Then while broadcast_receiver.recv().await.is_ok() {} assert!(instant.elapsed() <= Duration::from_secs(2)); + // Wait for the metrics to be updated. + tokio::time::sleep(Duration::from_secs(1)).await; let duration = Duration::from_nanos(heavy_task_processor.metric.busy.get()); assert_eq!(duration.as_secs(), 0); let duration = Duration::from_nanos(heavy_task_processor.metric.idle.get()); diff --git a/crates/services/src/lib.rs b/crates/services/src/lib.rs index 7162389e082..e7fa438631f 100644 --- a/crates/services/src/lib.rs +++ b/crates/services/src/lib.rs @@ -11,6 +11,7 @@ mod state; mod sync; #[cfg(feature = "sync-processor")] mod sync_processor; +pub mod yield_stream; /// Re-exports for streaming utilities pub mod stream { @@ -21,26 +22,37 @@ pub mod stream { Stream, }; - /// A Send + Sync BoxStream + /// A `Send` + `Sync` BoxStream with static lifetime. pub type BoxStream = core::pin::Pin + Send + Sync + 'static>>; + /// A `Send` BoxStream with a lifetime. + pub type RefBoxStream<'a, T> = core::pin::Pin + Send + 'a>>; + /// A Send + Sync BoxFuture pub type BoxFuture<'a, T> = core::pin::Pin + Send + Sync + 'a>>; /// Helper trait to create a BoxStream from a Stream pub trait IntoBoxStream: Stream { - /// Convert this stream into a BoxStream. + /// Convert this stream into a [`BoxStream`]. fn into_boxed(self) -> BoxStream where Self: Sized + Send + Sync + 'static, { Box::pin(self) } + + /// Convert this stream into a [`RefBoxStream`]. + fn into_boxed_ref<'a>(self) -> RefBoxStream<'a, Self::Item> + where + Self: Sized + Send + 'a, + { + Box::pin(self) + } } - impl IntoBoxStream for S where S: Stream + Send + Sync + 'static {} + impl IntoBoxStream for S where S: Stream + Send {} } /// Helper trait to trace errors diff --git a/crates/services/src/sync_processor.rs b/crates/services/src/sync_processor.rs index 36fb53bc2a6..4e23a68205f 100644 --- a/crates/services/src/sync_processor.rs +++ b/crates/services/src/sync_processor.rs @@ -209,6 +209,8 @@ mod tests { // Then while broadcast_receiver.recv().await.is_ok() {} assert!(instant.elapsed() >= Duration::from_secs(10)); + // Wait for the metrics to be updated. + tokio::time::sleep(Duration::from_secs(1)).await; let duration = Duration::from_nanos(heavy_task_processor.metric.busy.get()); assert_eq!(duration.as_secs(), 10); } @@ -275,6 +277,8 @@ mod tests { // Then while broadcast_receiver.recv().await.is_ok() {} assert!(instant.elapsed() <= Duration::from_secs(2)); + // Wait for the metrics to be updated. + tokio::time::sleep(Duration::from_secs(1)).await; let duration = Duration::from_nanos(heavy_task_processor.metric.busy.get()); assert_eq!(duration.as_secs(), 10); } diff --git a/crates/services/src/yield_stream.rs b/crates/services/src/yield_stream.rs new file mode 100644 index 00000000000..60331dbf119 --- /dev/null +++ b/crates/services/src/yield_stream.rs @@ -0,0 +1,164 @@ +//! Stream that yields each `batch_size` items allowing other tasks to work. + +use futures::{ + ready, + stream::Fuse, + Stream, + StreamExt, +}; +use std::{ + pin::Pin, + task::{ + Context, + Poll, + }, +}; + +pin_project_lite::pin_project! { + /// Stream that yields each `batch_size` items. + #[derive(Debug)] + #[must_use = "streams do nothing unless polled"] + pub struct YieldStream { + #[pin] + stream: Fuse, + item: Option, + counter: usize, + batch_size: usize, + } +} + +impl YieldStream { + /// Create a new `YieldStream` with the given `batch_size`. + pub fn new(stream: St, batch_size: usize) -> Self { + assert!(batch_size > 0); + + Self { + stream: stream.fuse(), + item: None, + counter: 0, + batch_size, + } + } +} + +impl Stream for YieldStream { + type Item = St::Item; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let mut this = self.as_mut().project(); + + // If we have a cached item, return it because that means we were woken up. + if let Some(item) = this.item.take() { + *this.counter = 1; + return Poll::Ready(Some(item)); + } + + match ready!(this.stream.as_mut().poll_next(cx)) { + // Return items, unless we reached the batch size. + // after that, we want to yield before returning the next item. + Some(item) => { + if this.counter < this.batch_size { + *this.counter = this.counter.saturating_add(1); + + Poll::Ready(Some(item)) + } else { + *this.item = Some(item); + + cx.waker().wake_by_ref(); + + Poll::Pending + } + } + + // Underlying stream ran out of values, so finish this stream as well. + None => Poll::Ready(None), + } + } + + fn size_hint(&self) -> (usize, Option) { + let cached_len = usize::from(self.item.is_some()); + let (lower, upper) = self.stream.size_hint(); + let lower = lower.saturating_add(cached_len); + let upper = match upper { + Some(x) => x.checked_add(cached_len), + None => None, + }; + (lower, upper) + } +} + +/// Extension trait for `Stream`. +pub trait StreamYieldExt: Stream { + /// Yields each `batch_size` items allowing other tasks to work. + fn yield_each(self, batch_size: usize) -> YieldStream + where + Self: Sized, + { + YieldStream::new(self, batch_size) + } +} + +impl StreamYieldExt for St where St: Stream {} + +#[cfg(test)] +#[allow(non_snake_case)] +mod tests { + use super::*; + + #[tokio::test] + async fn yield_stream__works_with_10_elements_loop() { + let stream = futures::stream::iter(0..10); + let mut yield_stream = YieldStream::new(stream, 3); + + let mut items = Vec::new(); + while let Some(item) = yield_stream.next().await { + items.push(item); + } + + assert_eq!(items, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + } + + #[tokio::test] + async fn yield_stream__works_with_10_elements__collect() { + let stream = futures::stream::iter(0..10); + let yield_stream = stream.yield_each(3); + + let items = yield_stream.collect::>().await; + + assert_eq!(items, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + } + + #[tokio::test] + async fn yield_stream__passed_control_to_another_future() { + let stream = futures::stream::iter(0..10); + let mut yield_stream = YieldStream::new(stream, 3); + + async fn second_future() -> i32 { + -1 + } + + let mut items = Vec::new(); + loop { + tokio::select! { + biased; + + item = yield_stream.next() => { + if let Some(item) = item { + items.push(item); + } else { + break; + } + } + + item = second_future() => { + items.push(item); + } + } + } + + assert_eq!(items, vec![0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9]); + } +} diff --git a/crates/services/upgradable-executor/src/executor.rs b/crates/services/upgradable-executor/src/executor.rs index fedecf28d49..38ebabe0283 100644 --- a/crates/services/upgradable-executor/src/executor.rs +++ b/crates/services/upgradable-executor/src/executor.rs @@ -154,7 +154,8 @@ impl Executor { ("0-37-0", 12), ("0-37-1", 13), ("0-38-0", 14), - ("0-39-0", LATEST_STATE_TRANSITION_VERSION), + ("0-39-0", 15), + ("0-40-0", LATEST_STATE_TRANSITION_VERSION), ]; pub fn new( diff --git a/crates/storage/src/iter.rs b/crates/storage/src/iter.rs index 37c38463cc8..35c550851fb 100644 --- a/crates/storage/src/iter.rs +++ b/crates/storage/src/iter.rs @@ -29,7 +29,7 @@ pub mod changes_iterator; // TODO: BoxedIter to be used until RPITIT lands in stable rust. /// A boxed variant of the iterator that can be used as a return type of the traits. pub struct BoxedIter<'a, T> { - iter: Box + 'a>, + iter: Box + 'a + Send>, } impl<'a, T> Iterator for BoxedIter<'a, T> { @@ -48,7 +48,7 @@ pub trait IntoBoxedIter<'a, T> { impl<'a, T, I> IntoBoxedIter<'a, T> for I where - I: Iterator + 'a, + I: Iterator + 'a + Send, { fn into_boxed(self) -> BoxedIter<'a, T> { BoxedIter { @@ -346,7 +346,10 @@ pub fn iterator<'a, V>( prefix: Option<&[u8]>, start: Option<&[u8]>, direction: IterDirection, -) -> impl Iterator + 'a { +) -> impl Iterator + 'a +where + V: Send + Sync, +{ match (prefix, start) { (None, None) => { if direction == IterDirection::Forward { @@ -401,7 +404,10 @@ pub fn keys_iterator<'a, V>( prefix: Option<&[u8]>, start: Option<&[u8]>, direction: IterDirection, -) -> impl Iterator + 'a { +) -> impl Iterator + 'a +where + V: Send + Sync, +{ match (prefix, start) { (None, None) => { if direction == IterDirection::Forward { diff --git a/crates/storage/src/kv_store.rs b/crates/storage/src/kv_store.rs index e9d4eb22a52..67ab9493c7f 100644 --- a/crates/storage/src/kv_store.rs +++ b/crates/storage/src/kv_store.rs @@ -17,13 +17,8 @@ use core::ops::Deref; /// The key of the storage. pub type Key = Vec; -#[cfg(feature = "std")] /// The value of the storage. It is wrapped into the `Arc` to provide less cloning of massive objects. -pub type Value = std::sync::Arc>; - -#[cfg(not(feature = "std"))] -/// The value of the storage. It is wrapped into the `Rc` to provide less cloning of massive objects. -pub type Value = alloc::rc::Rc>; +pub type Value = alloc::sync::Arc>; /// The pair of key and value from the storage. pub type KVItem = StorageResult<(Key, Value)>; diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 951435ea56b..65aba0f1399 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -166,7 +166,7 @@ pub type ConsensusParametersVersion = u32; pub type StateTransitionBytecodeVersion = u32; /// The latest version of the state transition bytecode. -pub const LATEST_STATE_TRANSITION_VERSION: StateTransitionBytecodeVersion = 15; +pub const LATEST_STATE_TRANSITION_VERSION: StateTransitionBytecodeVersion = 16; #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] diff --git a/crates/types/src/services/executor.rs b/crates/types/src/services/executor.rs index b8c0dfda407..357e3697638 100644 --- a/crates/types/src/services/executor.rs +++ b/crates/types/src/services/executor.rs @@ -232,6 +232,14 @@ impl TransactionExecutionResult { } } + /// Get the total fee paid by the transaction. + pub fn total_fee(&self) -> &u64 { + match self { + TransactionExecutionResult::Success { total_fee, .. } + | TransactionExecutionResult::Failed { total_fee, .. } => total_fee, + } + } + #[cfg(feature = "std")] /// Get the reason of the failed transaction execution. pub fn reason(receipts: &[Receipt], state: &Option) -> String { diff --git a/tests/test-helpers/src/builder.rs b/tests/test-helpers/src/builder.rs index 91134ceb7c3..0d16bc48dc6 100644 --- a/tests/test-helpers/src/builder.rs +++ b/tests/test-helpers/src/builder.rs @@ -93,6 +93,7 @@ pub struct TestSetupBuilder { pub initial_coins: Vec, pub starting_gas_price: u64, pub gas_limit: Option, + pub block_size_limit: Option, pub starting_block: Option, pub utxo_validation: bool, pub privileged_address: Address, @@ -201,6 +202,13 @@ impl TestSetupBuilder { .set_block_gas_limit(gas_limit); } + if let Some(block_size_limit) = self.block_size_limit { + chain_conf + .consensus_parameters + .set_block_transaction_size_limit(block_size_limit) + .expect("Should set new block size limit"); + } + chain_conf .consensus_parameters .set_privileged_address(self.privileged_address); @@ -251,6 +259,7 @@ impl Default for TestSetupBuilder { initial_coins: vec![], starting_gas_price: 0, gas_limit: None, + block_size_limit: None, starting_block: None, utxo_validation: true, privileged_address: Default::default(), diff --git a/tests/tests/dos.rs b/tests/tests/dos.rs index b9067738a5d..05c0e0fc030 100644 --- a/tests/tests/dos.rs +++ b/tests/tests/dos.rs @@ -1,6 +1,9 @@ #![allow(non_snake_case)] -use std::time::Instant; +use std::time::{ + Duration, + Instant, +}; use fuel_core::service::{ Config, @@ -234,7 +237,7 @@ async fn complex_queries__40_full_blocks__works() { } #[tokio::test] -async fn complex_queries__41_full_block__query_to_complex() { +async fn complex_queries__41_full_block__query_too_complex() { let query = FULL_BLOCK_QUERY.to_string(); let query = query.replace("$NUMBER_OF_BLOCKS", "41"); @@ -245,6 +248,21 @@ async fn complex_queries__41_full_block__query_to_complex() { assert!(result.contains("Query is too complex.")); } +#[tokio::test] +async fn complex_queries__increased_block_header_cost__failed_to_initialize_service() { + let mut config = Config::local_node(); + config.graphql_config.costs.block_header = + config.graphql_config.max_queries_complexity; + + let Err(error) = FuelService::new_node(config).await else { + panic!("expected error"); + }; + + assert!(error + .to_string() + .contains("cannot initialize queries with non-default costs in tests")) +} + #[tokio::test] async fn complex_queries__100_block_headers__works() { let query = r#" @@ -666,3 +684,40 @@ async fn schema_is_retrievable() { let result = send_graph_ql_query(&url, query).await; assert!(result.contains("__schema"), "{:?}", result); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 8)] +async fn heavy_tasks_doesnt_block_graphql() { + let mut config = Config::local_node(); + + const NUM_OF_BLOCKS: u32 = 4000; + config.graphql_config.max_queries_complexity = 10_000_000; + + let query = FULL_BLOCK_QUERY.to_string(); + let query = query.replace("$NUMBER_OF_BLOCKS", NUM_OF_BLOCKS.to_string().as_str()); + + let node = FuelService::new_node(config).await.unwrap(); + let url = format!("http://{}/v1/graphql", node.bound_address); + let client = FuelClient::new(url.clone()).unwrap(); + client.produce_blocks(NUM_OF_BLOCKS, None).await.unwrap(); + + // Given + for _ in 0..50 { + let url = url.clone(); + let query = query.clone(); + tokio::spawn(async move { + tokio::time::sleep(Duration::from_millis(20)).await; + let result = send_graph_ql_query(&url, &query).await; + assert!(result.contains("transactions")); + }); + } + // Wait for all queries to start be processed on the node. + tokio::time::sleep(Duration::from_secs(1)).await; + + // When + let result = tokio::time::timeout(Duration::from_secs(5), client.health()).await; + + // Then + let result = result.expect("Health check timed out"); + let health = result.expect("Health check failed"); + assert!(health); +} diff --git a/tests/tests/poa.rs b/tests/tests/poa.rs index fca05616275..792064fbade 100644 --- a/tests/tests/poa.rs +++ b/tests/tests/poa.rs @@ -259,6 +259,7 @@ mod p2p { // Then starts second_producer that uses the first one as a reserved peer. // second_producer should not produce blocks while the first one is producing // after the first_producer stops, second_producer should start producing blocks + #[ignore = "seems to be flaky, issue: https://github.com/FuelLabs/fuel-core/issues/2351"] #[tokio::test(flavor = "multi_thread")] async fn test_poa_multiple_producers() { const SYNC_TIMEOUT: u64 = 30;